diff --git a/deps/openssl/config/archs/BSD-x86/asm/configdata.pm b/deps/openssl/config/archs/BSD-x86/asm/configdata.pm index 765ef97df004ce..a6393bf6b558cf 100644 --- a/deps/openssl/config/archs/BSD-x86/asm/configdata.pm +++ b/deps/openssl/config/archs/BSD-x86/asm/configdata.pm @@ -62,7 +62,7 @@ our %config = ( options => "enable-ssl-trace no-afalgeng no-asan no-buildtest-c++ no-comp no-crypto-mdebug no-crypto-mdebug-backtrace no-dynamic-engine no-ec_nistp_64_gcc_128 no-egd no-external-tests no-fuzz-afl no-fuzz-libfuzzer no-heartbeats no-md2 no-msan no-rc5 no-sctp no-shared no-ssl3 no-ssl3-method no-ubsan no-unit-test no-weak-ssl-ciphers no-zlib no-zlib-dynamic", perl_archname => "x86_64-linux-gnu-thread-multi", perl_cmd => "/usr/bin/perl", - perl_version => "5.28.1", + perl_version => "5.26.1", perlargv => [ "no-comp", "no-shared", "no-afalgeng", "enable-ssl-trace", "BSD-x86" ], perlenv => { "AR" => undef, @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "BSD-x86", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -15447,3 +15447,4 @@ Verbose output. =back =cut + diff --git a/deps/openssl/config/archs/BSD-x86/asm/crypto/bf/bf-586.s b/deps/openssl/config/archs/BSD-x86/asm/crypto/bf/bf-586.s new file mode 100644 index 00000000000000..3e834e9b7b05a4 --- /dev/null +++ b/deps/openssl/config/archs/BSD-x86/asm/crypto/bf/bf-586.s @@ -0,0 +1,892 @@ +.text +.globl _BF_encrypt +.type _BF_encrypt,@function +.align 4 +_BF_encrypt: +L_BF_encrypt_begin: + + pushl %ebp + pushl %ebx + movl 12(%esp),%ebx + movl 16(%esp),%ebp + pushl %esi + pushl %edi + # Load the 2 words + movl (%ebx),%edi + movl 4(%ebx),%esi + xorl %eax,%eax + movl (%ebp),%ebx + xorl %ecx,%ecx + xorl %ebx,%edi + + # Round 0 + movl 4(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 1 + movl 8(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 2 + movl 12(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 3 + movl 16(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 4 + movl 20(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 5 + movl 24(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 6 + movl 28(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 7 + movl 32(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 8 + movl 36(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 9 + movl 40(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 10 + movl 44(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 11 + movl 48(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 12 + movl 52(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 13 + movl 56(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 14 + movl 60(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 15 + movl 64(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + # Load parameter 0 (16) enc=1 + movl 20(%esp),%eax + xorl %ebx,%edi + movl 68(%ebp),%edx + xorl %edx,%esi + movl %edi,4(%eax) + movl %esi,(%eax) + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.globl _BF_decrypt +.type _BF_decrypt,@function +.align 4 +_BF_decrypt: +L_BF_decrypt_begin: + + pushl %ebp + pushl %ebx + movl 12(%esp),%ebx + movl 16(%esp),%ebp + pushl %esi + pushl %edi + # Load the 2 words + movl (%ebx),%edi + movl 4(%ebx),%esi + xorl %eax,%eax + movl 68(%ebp),%ebx + xorl %ecx,%ecx + xorl %ebx,%edi + + # Round 16 + movl 64(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 15 + movl 60(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 14 + movl 56(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 13 + movl 52(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 12 + movl 48(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 11 + movl 44(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 10 + movl 40(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 9 + movl 36(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 8 + movl 32(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 7 + movl 28(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 6 + movl 24(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 5 + movl 20(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 4 + movl 16(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 3 + movl 12(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%edi + + # Round 2 + movl 8(%ebp),%edx + movl %edi,%ebx + xorl %edx,%esi + shrl $16,%ebx + movl %edi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + xorl %eax,%eax + xorl %ebx,%esi + + # Round 1 + movl 4(%ebp),%edx + movl %esi,%ebx + xorl %edx,%edi + shrl $16,%ebx + movl %esi,%edx + movb %bh,%al + andl $255,%ebx + movb %dh,%cl + andl $255,%edx + movl 72(%ebp,%eax,4),%eax + movl 1096(%ebp,%ebx,4),%ebx + addl %eax,%ebx + movl 2120(%ebp,%ecx,4),%eax + xorl %eax,%ebx + movl 3144(%ebp,%edx,4),%edx + addl %edx,%ebx + # Load parameter 0 (1) enc=0 + movl 20(%esp),%eax + xorl %ebx,%edi + movl (%ebp),%edx + xorl %edx,%esi + movl %edi,4(%eax) + movl %esi,(%eax) + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.globl _BF_cbc_encrypt +.type _BF_cbc_encrypt,@function +.align 4 +_BF_cbc_encrypt: +L_BF_cbc_encrypt_begin: + + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + movl 28(%esp),%ebp + # getting iv ptr from parameter 4 + movl 36(%esp),%ebx + movl (%ebx),%esi + movl 4(%ebx),%edi + pushl %edi + pushl %esi + pushl %edi + pushl %esi + movl %esp,%ebx + movl 36(%esp),%esi + movl 40(%esp),%edi + # getting encrypt flag from parameter 5 + movl 56(%esp),%ecx + # get and push parameter 3 + movl 48(%esp),%eax + pushl %eax + pushl %ebx + cmpl $0,%ecx + jz L000decrypt + andl $4294967288,%ebp + movl 8(%esp),%eax + movl 12(%esp),%ebx + jz L001encrypt_finish +L002encrypt_loop: + movl (%esi),%ecx + movl 4(%esi),%edx + xorl %ecx,%eax + xorl %edx,%ebx + bswap %eax + bswap %ebx + movl %eax,8(%esp) + movl %ebx,12(%esp) + call L_BF_encrypt_begin + movl 8(%esp),%eax + movl 12(%esp),%ebx + bswap %eax + bswap %ebx + movl %eax,(%edi) + movl %ebx,4(%edi) + addl $8,%esi + addl $8,%edi + subl $8,%ebp + jnz L002encrypt_loop +L001encrypt_finish: + movl 52(%esp),%ebp + andl $7,%ebp + jz L003finish + call L004PIC_point +L004PIC_point: + popl %edx + leal L005cbc_enc_jmp_table-L004PIC_point(%edx),%ecx + movl (%ecx,%ebp,4),%ebp + addl %edx,%ebp + xorl %ecx,%ecx + xorl %edx,%edx + jmp *%ebp +L006ej7: + movb 6(%esi),%dh + shll $8,%edx +L007ej6: + movb 5(%esi),%dh +L008ej5: + movb 4(%esi),%dl +L009ej4: + movl (%esi),%ecx + jmp L010ejend +L011ej3: + movb 2(%esi),%ch + shll $8,%ecx +L012ej2: + movb 1(%esi),%ch +L013ej1: + movb (%esi),%cl +L010ejend: + xorl %ecx,%eax + xorl %edx,%ebx + bswap %eax + bswap %ebx + movl %eax,8(%esp) + movl %ebx,12(%esp) + call L_BF_encrypt_begin + movl 8(%esp),%eax + movl 12(%esp),%ebx + bswap %eax + bswap %ebx + movl %eax,(%edi) + movl %ebx,4(%edi) + jmp L003finish +L000decrypt: + andl $4294967288,%ebp + movl 16(%esp),%eax + movl 20(%esp),%ebx + jz L014decrypt_finish +L015decrypt_loop: + movl (%esi),%eax + movl 4(%esi),%ebx + bswap %eax + bswap %ebx + movl %eax,8(%esp) + movl %ebx,12(%esp) + call L_BF_decrypt_begin + movl 8(%esp),%eax + movl 12(%esp),%ebx + bswap %eax + bswap %ebx + movl 16(%esp),%ecx + movl 20(%esp),%edx + xorl %eax,%ecx + xorl %ebx,%edx + movl (%esi),%eax + movl 4(%esi),%ebx + movl %ecx,(%edi) + movl %edx,4(%edi) + movl %eax,16(%esp) + movl %ebx,20(%esp) + addl $8,%esi + addl $8,%edi + subl $8,%ebp + jnz L015decrypt_loop +L014decrypt_finish: + movl 52(%esp),%ebp + andl $7,%ebp + jz L003finish + movl (%esi),%eax + movl 4(%esi),%ebx + bswap %eax + bswap %ebx + movl %eax,8(%esp) + movl %ebx,12(%esp) + call L_BF_decrypt_begin + movl 8(%esp),%eax + movl 12(%esp),%ebx + bswap %eax + bswap %ebx + movl 16(%esp),%ecx + movl 20(%esp),%edx + xorl %eax,%ecx + xorl %ebx,%edx + movl (%esi),%eax + movl 4(%esi),%ebx +L016dj7: + rorl $16,%edx + movb %dl,6(%edi) + shrl $16,%edx +L017dj6: + movb %dh,5(%edi) +L018dj5: + movb %dl,4(%edi) +L019dj4: + movl %ecx,(%edi) + jmp L020djend +L021dj3: + rorl $16,%ecx + movb %cl,2(%edi) + shll $16,%ecx +L022dj2: + movb %ch,1(%esi) +L023dj1: + movb %cl,(%esi) +L020djend: + jmp L003finish +L003finish: + movl 60(%esp),%ecx + addl $24,%esp + movl %eax,(%ecx) + movl %ebx,4(%ecx) + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.align 6,0x90 +L005cbc_enc_jmp_table: +.long 0 +.long L013ej1-L004PIC_point +.long L012ej2-L004PIC_point +.long L011ej3-L004PIC_point +.long L009ej4-L004PIC_point +.long L008ej5-L004PIC_point +.long L007ej6-L004PIC_point +.long L006ej7-L004PIC_point +.align 6,0x90 diff --git a/deps/openssl/config/archs/BSD-x86/asm/crypto/buildinf.h b/deps/openssl/config/archs/BSD-x86/asm/crypto/buildinf.h index cc24d30a9b9b75..c1217da5225db5 100644 --- a/deps/openssl/config/archs/BSD-x86/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/BSD-x86/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: BSD-x86" -#define DATE "built on: Wed Mar 18 21:04:48 2020 UTC" +#define DATE "built on: Tue Mar 31 13:06:25 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/BSD-x86/asm/crypto/des/des-586.s b/deps/openssl/config/archs/BSD-x86/asm/crypto/des/des-586.s new file mode 100644 index 00000000000000..5ddd0ed7311ec1 --- /dev/null +++ b/deps/openssl/config/archs/BSD-x86/asm/crypto/des/des-586.s @@ -0,0 +1,1829 @@ +.text +.globl _DES_SPtrans +.type __x86_DES_encrypt,@function +.align 4 +__x86_DES_encrypt: + pushl %ecx + # Round 0 + movl (%ecx),%eax + xorl %ebx,%ebx + movl 4(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 1 + movl 8(%ecx),%eax + xorl %ebx,%ebx + movl 12(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 2 + movl 16(%ecx),%eax + xorl %ebx,%ebx + movl 20(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 3 + movl 24(%ecx),%eax + xorl %ebx,%ebx + movl 28(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 4 + movl 32(%ecx),%eax + xorl %ebx,%ebx + movl 36(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 5 + movl 40(%ecx),%eax + xorl %ebx,%ebx + movl 44(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 6 + movl 48(%ecx),%eax + xorl %ebx,%ebx + movl 52(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 7 + movl 56(%ecx),%eax + xorl %ebx,%ebx + movl 60(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 8 + movl 64(%ecx),%eax + xorl %ebx,%ebx + movl 68(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 9 + movl 72(%ecx),%eax + xorl %ebx,%ebx + movl 76(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 10 + movl 80(%ecx),%eax + xorl %ebx,%ebx + movl 84(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 11 + movl 88(%ecx),%eax + xorl %ebx,%ebx + movl 92(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 12 + movl 96(%ecx),%eax + xorl %ebx,%ebx + movl 100(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 13 + movl 104(%ecx),%eax + xorl %ebx,%ebx + movl 108(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 14 + movl 112(%ecx),%eax + xorl %ebx,%ebx + movl 116(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 15 + movl 120(%ecx),%eax + xorl %ebx,%ebx + movl 124(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + addl $4,%esp + ret +.type __x86_DES_decrypt,@function +.align 4 +__x86_DES_decrypt: + pushl %ecx + # Round 15 + movl 120(%ecx),%eax + xorl %ebx,%ebx + movl 124(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 14 + movl 112(%ecx),%eax + xorl %ebx,%ebx + movl 116(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 13 + movl 104(%ecx),%eax + xorl %ebx,%ebx + movl 108(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 12 + movl 96(%ecx),%eax + xorl %ebx,%ebx + movl 100(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 11 + movl 88(%ecx),%eax + xorl %ebx,%ebx + movl 92(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 10 + movl 80(%ecx),%eax + xorl %ebx,%ebx + movl 84(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 9 + movl 72(%ecx),%eax + xorl %ebx,%ebx + movl 76(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 8 + movl 64(%ecx),%eax + xorl %ebx,%ebx + movl 68(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 7 + movl 56(%ecx),%eax + xorl %ebx,%ebx + movl 60(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 6 + movl 48(%ecx),%eax + xorl %ebx,%ebx + movl 52(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 5 + movl 40(%ecx),%eax + xorl %ebx,%ebx + movl 44(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 4 + movl 32(%ecx),%eax + xorl %ebx,%ebx + movl 36(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 3 + movl 24(%ecx),%eax + xorl %ebx,%ebx + movl 28(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 2 + movl 16(%ecx),%eax + xorl %ebx,%ebx + movl 20(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + # Round 1 + movl 8(%ecx),%eax + xorl %ebx,%ebx + movl 12(%ecx),%edx + xorl %esi,%eax + xorl %ecx,%ecx + xorl %esi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%edi + xorl 0x700(%ebp,%ecx,1),%edi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%edi + xorl 0x500(%ebp,%edx,1),%edi + # Round 0 + movl (%ecx),%eax + xorl %ebx,%ebx + movl 4(%ecx),%edx + xorl %edi,%eax + xorl %ecx,%ecx + xorl %edi,%edx + andl $0xfcfcfcfc,%eax + andl $0xcfcfcfcf,%edx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + xorl 0x600(%ebp,%ebx,1),%esi + xorl 0x700(%ebp,%ecx,1),%esi + movl (%esp),%ecx + xorl 0x400(%ebp,%eax,1),%esi + xorl 0x500(%ebp,%edx,1),%esi + addl $4,%esp + ret +.globl _DES_encrypt1 +.type _DES_encrypt1,@function +.align 4 +_DES_encrypt1: +L_DES_encrypt1_begin: + pushl %esi + pushl %edi + + # Load the 2 words + movl 12(%esp),%esi + xorl %ecx,%ecx + pushl %ebx + pushl %ebp + movl (%esi),%eax + movl 28(%esp),%ebx + movl 4(%esi),%edi + + # IP + roll $4,%eax + movl %eax,%esi + xorl %edi,%eax + andl $0xf0f0f0f0,%eax + xorl %eax,%esi + xorl %eax,%edi + + roll $20,%edi + movl %edi,%eax + xorl %esi,%edi + andl $0xfff0000f,%edi + xorl %edi,%eax + xorl %edi,%esi + + roll $14,%eax + movl %eax,%edi + xorl %esi,%eax + andl $0x33333333,%eax + xorl %eax,%edi + xorl %eax,%esi + + roll $22,%esi + movl %esi,%eax + xorl %edi,%esi + andl $0x03fc03fc,%esi + xorl %esi,%eax + xorl %esi,%edi + + roll $9,%eax + movl %eax,%esi + xorl %edi,%eax + andl $0xaaaaaaaa,%eax + xorl %eax,%esi + xorl %eax,%edi + + roll $1,%edi + call L000pic_point +L000pic_point: + popl %ebp + leal Ldes_sptrans-L000pic_point(%ebp),%ebp + movl 24(%esp),%ecx + cmpl $0,%ebx + je L001decrypt + call __x86_DES_encrypt + jmp L002done +L001decrypt: + call __x86_DES_decrypt +L002done: + + # FP + movl 20(%esp),%edx + rorl $1,%esi + movl %edi,%eax + xorl %esi,%edi + andl $0xaaaaaaaa,%edi + xorl %edi,%eax + xorl %edi,%esi + + roll $23,%eax + movl %eax,%edi + xorl %esi,%eax + andl $0x03fc03fc,%eax + xorl %eax,%edi + xorl %eax,%esi + + roll $10,%edi + movl %edi,%eax + xorl %esi,%edi + andl $0x33333333,%edi + xorl %edi,%eax + xorl %edi,%esi + + roll $18,%esi + movl %esi,%edi + xorl %eax,%esi + andl $0xfff0000f,%esi + xorl %esi,%edi + xorl %esi,%eax + + roll $12,%edi + movl %edi,%esi + xorl %eax,%edi + andl $0xf0f0f0f0,%edi + xorl %edi,%esi + xorl %edi,%eax + + rorl $4,%eax + movl %eax,(%edx) + movl %esi,4(%edx) + popl %ebp + popl %ebx + popl %edi + popl %esi + ret +.globl _DES_encrypt2 +.type _DES_encrypt2,@function +.align 4 +_DES_encrypt2: +L_DES_encrypt2_begin: + pushl %esi + pushl %edi + + # Load the 2 words + movl 12(%esp),%eax + xorl %ecx,%ecx + pushl %ebx + pushl %ebp + movl (%eax),%esi + movl 28(%esp),%ebx + roll $3,%esi + movl 4(%eax),%edi + roll $3,%edi + call L003pic_point +L003pic_point: + popl %ebp + leal Ldes_sptrans-L003pic_point(%ebp),%ebp + movl 24(%esp),%ecx + cmpl $0,%ebx + je L004decrypt + call __x86_DES_encrypt + jmp L005done +L004decrypt: + call __x86_DES_decrypt +L005done: + + # Fixup + rorl $3,%edi + movl 20(%esp),%eax + rorl $3,%esi + movl %edi,(%eax) + movl %esi,4(%eax) + popl %ebp + popl %ebx + popl %edi + popl %esi + ret +.globl _DES_encrypt3 +.type _DES_encrypt3,@function +.align 4 +_DES_encrypt3: +L_DES_encrypt3_begin: + pushl %ebx + movl 8(%esp),%ebx + pushl %ebp + pushl %esi + pushl %edi + + # Load the data words + movl (%ebx),%edi + movl 4(%ebx),%esi + subl $12,%esp + + # IP + roll $4,%edi + movl %edi,%edx + xorl %esi,%edi + andl $0xf0f0f0f0,%edi + xorl %edi,%edx + xorl %edi,%esi + + roll $20,%esi + movl %esi,%edi + xorl %edx,%esi + andl $0xfff0000f,%esi + xorl %esi,%edi + xorl %esi,%edx + + roll $14,%edi + movl %edi,%esi + xorl %edx,%edi + andl $0x33333333,%edi + xorl %edi,%esi + xorl %edi,%edx + + roll $22,%edx + movl %edx,%edi + xorl %esi,%edx + andl $0x03fc03fc,%edx + xorl %edx,%edi + xorl %edx,%esi + + roll $9,%edi + movl %edi,%edx + xorl %esi,%edi + andl $0xaaaaaaaa,%edi + xorl %edi,%edx + xorl %edi,%esi + + rorl $3,%edx + rorl $2,%esi + movl %esi,4(%ebx) + movl 36(%esp),%eax + movl %edx,(%ebx) + movl 40(%esp),%edi + movl 44(%esp),%esi + movl $1,8(%esp) + movl %eax,4(%esp) + movl %ebx,(%esp) + call L_DES_encrypt2_begin + movl $0,8(%esp) + movl %edi,4(%esp) + movl %ebx,(%esp) + call L_DES_encrypt2_begin + movl $1,8(%esp) + movl %esi,4(%esp) + movl %ebx,(%esp) + call L_DES_encrypt2_begin + addl $12,%esp + movl (%ebx),%edi + movl 4(%ebx),%esi + + # FP + roll $2,%esi + roll $3,%edi + movl %edi,%eax + xorl %esi,%edi + andl $0xaaaaaaaa,%edi + xorl %edi,%eax + xorl %edi,%esi + + roll $23,%eax + movl %eax,%edi + xorl %esi,%eax + andl $0x03fc03fc,%eax + xorl %eax,%edi + xorl %eax,%esi + + roll $10,%edi + movl %edi,%eax + xorl %esi,%edi + andl $0x33333333,%edi + xorl %edi,%eax + xorl %edi,%esi + + roll $18,%esi + movl %esi,%edi + xorl %eax,%esi + andl $0xfff0000f,%esi + xorl %esi,%edi + xorl %esi,%eax + + roll $12,%edi + movl %edi,%esi + xorl %eax,%edi + andl $0xf0f0f0f0,%edi + xorl %edi,%esi + xorl %edi,%eax + + rorl $4,%eax + movl %eax,(%ebx) + movl %esi,4(%ebx) + popl %edi + popl %esi + popl %ebp + popl %ebx + ret +.globl _DES_decrypt3 +.type _DES_decrypt3,@function +.align 4 +_DES_decrypt3: +L_DES_decrypt3_begin: + pushl %ebx + movl 8(%esp),%ebx + pushl %ebp + pushl %esi + pushl %edi + + # Load the data words + movl (%ebx),%edi + movl 4(%ebx),%esi + subl $12,%esp + + # IP + roll $4,%edi + movl %edi,%edx + xorl %esi,%edi + andl $0xf0f0f0f0,%edi + xorl %edi,%edx + xorl %edi,%esi + + roll $20,%esi + movl %esi,%edi + xorl %edx,%esi + andl $0xfff0000f,%esi + xorl %esi,%edi + xorl %esi,%edx + + roll $14,%edi + movl %edi,%esi + xorl %edx,%edi + andl $0x33333333,%edi + xorl %edi,%esi + xorl %edi,%edx + + roll $22,%edx + movl %edx,%edi + xorl %esi,%edx + andl $0x03fc03fc,%edx + xorl %edx,%edi + xorl %edx,%esi + + roll $9,%edi + movl %edi,%edx + xorl %esi,%edi + andl $0xaaaaaaaa,%edi + xorl %edi,%edx + xorl %edi,%esi + + rorl $3,%edx + rorl $2,%esi + movl %esi,4(%ebx) + movl 36(%esp),%esi + movl %edx,(%ebx) + movl 40(%esp),%edi + movl 44(%esp),%eax + movl $0,8(%esp) + movl %eax,4(%esp) + movl %ebx,(%esp) + call L_DES_encrypt2_begin + movl $1,8(%esp) + movl %edi,4(%esp) + movl %ebx,(%esp) + call L_DES_encrypt2_begin + movl $0,8(%esp) + movl %esi,4(%esp) + movl %ebx,(%esp) + call L_DES_encrypt2_begin + addl $12,%esp + movl (%ebx),%edi + movl 4(%ebx),%esi + + # FP + roll $2,%esi + roll $3,%edi + movl %edi,%eax + xorl %esi,%edi + andl $0xaaaaaaaa,%edi + xorl %edi,%eax + xorl %edi,%esi + + roll $23,%eax + movl %eax,%edi + xorl %esi,%eax + andl $0x03fc03fc,%eax + xorl %eax,%edi + xorl %eax,%esi + + roll $10,%edi + movl %edi,%eax + xorl %esi,%edi + andl $0x33333333,%edi + xorl %edi,%eax + xorl %edi,%esi + + roll $18,%esi + movl %esi,%edi + xorl %eax,%esi + andl $0xfff0000f,%esi + xorl %esi,%edi + xorl %esi,%eax + + roll $12,%edi + movl %edi,%esi + xorl %eax,%edi + andl $0xf0f0f0f0,%edi + xorl %edi,%esi + xorl %edi,%eax + + rorl $4,%eax + movl %eax,(%ebx) + movl %esi,4(%ebx) + popl %edi + popl %esi + popl %ebp + popl %ebx + ret +.globl _DES_ncbc_encrypt +.type _DES_ncbc_encrypt,@function +.align 4 +_DES_ncbc_encrypt: +L_DES_ncbc_encrypt_begin: + + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + movl 28(%esp),%ebp + # getting iv ptr from parameter 4 + movl 36(%esp),%ebx + movl (%ebx),%esi + movl 4(%ebx),%edi + pushl %edi + pushl %esi + pushl %edi + pushl %esi + movl %esp,%ebx + movl 36(%esp),%esi + movl 40(%esp),%edi + # getting encrypt flag from parameter 5 + movl 56(%esp),%ecx + # get and push parameter 5 + pushl %ecx + # get and push parameter 3 + movl 52(%esp),%eax + pushl %eax + pushl %ebx + cmpl $0,%ecx + jz L006decrypt + andl $4294967288,%ebp + movl 12(%esp),%eax + movl 16(%esp),%ebx + jz L007encrypt_finish +L008encrypt_loop: + movl (%esi),%ecx + movl 4(%esi),%edx + xorl %ecx,%eax + xorl %edx,%ebx + movl %eax,12(%esp) + movl %ebx,16(%esp) + call L_DES_encrypt1_begin + movl 12(%esp),%eax + movl 16(%esp),%ebx + movl %eax,(%edi) + movl %ebx,4(%edi) + addl $8,%esi + addl $8,%edi + subl $8,%ebp + jnz L008encrypt_loop +L007encrypt_finish: + movl 56(%esp),%ebp + andl $7,%ebp + jz L009finish + call L010PIC_point +L010PIC_point: + popl %edx + leal L011cbc_enc_jmp_table-L010PIC_point(%edx),%ecx + movl (%ecx,%ebp,4),%ebp + addl %edx,%ebp + xorl %ecx,%ecx + xorl %edx,%edx + jmp *%ebp +L012ej7: + movb 6(%esi),%dh + shll $8,%edx +L013ej6: + movb 5(%esi),%dh +L014ej5: + movb 4(%esi),%dl +L015ej4: + movl (%esi),%ecx + jmp L016ejend +L017ej3: + movb 2(%esi),%ch + shll $8,%ecx +L018ej2: + movb 1(%esi),%ch +L019ej1: + movb (%esi),%cl +L016ejend: + xorl %ecx,%eax + xorl %edx,%ebx + movl %eax,12(%esp) + movl %ebx,16(%esp) + call L_DES_encrypt1_begin + movl 12(%esp),%eax + movl 16(%esp),%ebx + movl %eax,(%edi) + movl %ebx,4(%edi) + jmp L009finish +L006decrypt: + andl $4294967288,%ebp + movl 20(%esp),%eax + movl 24(%esp),%ebx + jz L020decrypt_finish +L021decrypt_loop: + movl (%esi),%eax + movl 4(%esi),%ebx + movl %eax,12(%esp) + movl %ebx,16(%esp) + call L_DES_encrypt1_begin + movl 12(%esp),%eax + movl 16(%esp),%ebx + movl 20(%esp),%ecx + movl 24(%esp),%edx + xorl %eax,%ecx + xorl %ebx,%edx + movl (%esi),%eax + movl 4(%esi),%ebx + movl %ecx,(%edi) + movl %edx,4(%edi) + movl %eax,20(%esp) + movl %ebx,24(%esp) + addl $8,%esi + addl $8,%edi + subl $8,%ebp + jnz L021decrypt_loop +L020decrypt_finish: + movl 56(%esp),%ebp + andl $7,%ebp + jz L009finish + movl (%esi),%eax + movl 4(%esi),%ebx + movl %eax,12(%esp) + movl %ebx,16(%esp) + call L_DES_encrypt1_begin + movl 12(%esp),%eax + movl 16(%esp),%ebx + movl 20(%esp),%ecx + movl 24(%esp),%edx + xorl %eax,%ecx + xorl %ebx,%edx + movl (%esi),%eax + movl 4(%esi),%ebx +L022dj7: + rorl $16,%edx + movb %dl,6(%edi) + shrl $16,%edx +L023dj6: + movb %dh,5(%edi) +L024dj5: + movb %dl,4(%edi) +L025dj4: + movl %ecx,(%edi) + jmp L026djend +L027dj3: + rorl $16,%ecx + movb %cl,2(%edi) + shll $16,%ecx +L028dj2: + movb %ch,1(%esi) +L029dj1: + movb %cl,(%esi) +L026djend: + jmp L009finish +L009finish: + movl 64(%esp),%ecx + addl $28,%esp + movl %eax,(%ecx) + movl %ebx,4(%ecx) + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.align 6,0x90 +L011cbc_enc_jmp_table: +.long 0 +.long L019ej1-L010PIC_point +.long L018ej2-L010PIC_point +.long L017ej3-L010PIC_point +.long L015ej4-L010PIC_point +.long L014ej5-L010PIC_point +.long L013ej6-L010PIC_point +.long L012ej7-L010PIC_point +.align 6,0x90 +.globl _DES_ede3_cbc_encrypt +.type _DES_ede3_cbc_encrypt,@function +.align 4 +_DES_ede3_cbc_encrypt: +L_DES_ede3_cbc_encrypt_begin: + + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + movl 28(%esp),%ebp + # getting iv ptr from parameter 6 + movl 44(%esp),%ebx + movl (%ebx),%esi + movl 4(%ebx),%edi + pushl %edi + pushl %esi + pushl %edi + pushl %esi + movl %esp,%ebx + movl 36(%esp),%esi + movl 40(%esp),%edi + # getting encrypt flag from parameter 7 + movl 64(%esp),%ecx + # get and push parameter 5 + movl 56(%esp),%eax + pushl %eax + # get and push parameter 4 + movl 56(%esp),%eax + pushl %eax + # get and push parameter 3 + movl 56(%esp),%eax + pushl %eax + pushl %ebx + cmpl $0,%ecx + jz L030decrypt + andl $4294967288,%ebp + movl 16(%esp),%eax + movl 20(%esp),%ebx + jz L031encrypt_finish +L032encrypt_loop: + movl (%esi),%ecx + movl 4(%esi),%edx + xorl %ecx,%eax + xorl %edx,%ebx + movl %eax,16(%esp) + movl %ebx,20(%esp) + call L_DES_encrypt3_begin + movl 16(%esp),%eax + movl 20(%esp),%ebx + movl %eax,(%edi) + movl %ebx,4(%edi) + addl $8,%esi + addl $8,%edi + subl $8,%ebp + jnz L032encrypt_loop +L031encrypt_finish: + movl 60(%esp),%ebp + andl $7,%ebp + jz L033finish + call L034PIC_point +L034PIC_point: + popl %edx + leal L035cbc_enc_jmp_table-L034PIC_point(%edx),%ecx + movl (%ecx,%ebp,4),%ebp + addl %edx,%ebp + xorl %ecx,%ecx + xorl %edx,%edx + jmp *%ebp +L036ej7: + movb 6(%esi),%dh + shll $8,%edx +L037ej6: + movb 5(%esi),%dh +L038ej5: + movb 4(%esi),%dl +L039ej4: + movl (%esi),%ecx + jmp L040ejend +L041ej3: + movb 2(%esi),%ch + shll $8,%ecx +L042ej2: + movb 1(%esi),%ch +L043ej1: + movb (%esi),%cl +L040ejend: + xorl %ecx,%eax + xorl %edx,%ebx + movl %eax,16(%esp) + movl %ebx,20(%esp) + call L_DES_encrypt3_begin + movl 16(%esp),%eax + movl 20(%esp),%ebx + movl %eax,(%edi) + movl %ebx,4(%edi) + jmp L033finish +L030decrypt: + andl $4294967288,%ebp + movl 24(%esp),%eax + movl 28(%esp),%ebx + jz L044decrypt_finish +L045decrypt_loop: + movl (%esi),%eax + movl 4(%esi),%ebx + movl %eax,16(%esp) + movl %ebx,20(%esp) + call L_DES_decrypt3_begin + movl 16(%esp),%eax + movl 20(%esp),%ebx + movl 24(%esp),%ecx + movl 28(%esp),%edx + xorl %eax,%ecx + xorl %ebx,%edx + movl (%esi),%eax + movl 4(%esi),%ebx + movl %ecx,(%edi) + movl %edx,4(%edi) + movl %eax,24(%esp) + movl %ebx,28(%esp) + addl $8,%esi + addl $8,%edi + subl $8,%ebp + jnz L045decrypt_loop +L044decrypt_finish: + movl 60(%esp),%ebp + andl $7,%ebp + jz L033finish + movl (%esi),%eax + movl 4(%esi),%ebx + movl %eax,16(%esp) + movl %ebx,20(%esp) + call L_DES_decrypt3_begin + movl 16(%esp),%eax + movl 20(%esp),%ebx + movl 24(%esp),%ecx + movl 28(%esp),%edx + xorl %eax,%ecx + xorl %ebx,%edx + movl (%esi),%eax + movl 4(%esi),%ebx +L046dj7: + rorl $16,%edx + movb %dl,6(%edi) + shrl $16,%edx +L047dj6: + movb %dh,5(%edi) +L048dj5: + movb %dl,4(%edi) +L049dj4: + movl %ecx,(%edi) + jmp L050djend +L051dj3: + rorl $16,%ecx + movb %cl,2(%edi) + shll $16,%ecx +L052dj2: + movb %ch,1(%esi) +L053dj1: + movb %cl,(%esi) +L050djend: + jmp L033finish +L033finish: + movl 76(%esp),%ecx + addl $32,%esp + movl %eax,(%ecx) + movl %ebx,4(%ecx) + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.align 6,0x90 +L035cbc_enc_jmp_table: +.long 0 +.long L043ej1-L034PIC_point +.long L042ej2-L034PIC_point +.long L041ej3-L034PIC_point +.long L039ej4-L034PIC_point +.long L038ej5-L034PIC_point +.long L037ej6-L034PIC_point +.long L036ej7-L034PIC_point +.align 6,0x90 +.align 6,0x90 +_DES_SPtrans: +Ldes_sptrans: +.long 34080768,524288,33554434,34080770 +.long 33554432,526338,524290,33554434 +.long 526338,34080768,34078720,2050 +.long 33556482,33554432,0,524290 +.long 524288,2,33556480,526336 +.long 34080770,34078720,2050,33556480 +.long 2,2048,526336,34078722 +.long 2048,33556482,34078722,0 +.long 0,34080770,33556480,524290 +.long 34080768,524288,2050,33556480 +.long 34078722,2048,526336,33554434 +.long 526338,2,33554434,34078720 +.long 34080770,526336,34078720,33556482 +.long 33554432,2050,524290,0 +.long 524288,33554432,33556482,34080768 +.long 2,34078722,2048,526338 +.long 1074823184,0,1081344,1074790400 +.long 1073741840,32784,1073774592,1081344 +.long 32768,1074790416,16,1073774592 +.long 1048592,1074823168,1074790400,16 +.long 1048576,1073774608,1074790416,32768 +.long 1081360,1073741824,0,1048592 +.long 1073774608,1081360,1074823168,1073741840 +.long 1073741824,1048576,32784,1074823184 +.long 1048592,1074823168,1073774592,1081360 +.long 1074823184,1048592,1073741840,0 +.long 1073741824,32784,1048576,1074790416 +.long 32768,1073741824,1081360,1073774608 +.long 1074823168,32768,0,1073741840 +.long 16,1074823184,1081344,1074790400 +.long 1074790416,1048576,32784,1073774592 +.long 1073774608,16,1074790400,1081344 +.long 67108865,67371264,256,67109121 +.long 262145,67108864,67109121,262400 +.long 67109120,262144,67371008,1 +.long 67371265,257,1,67371009 +.long 0,262145,67371264,256 +.long 257,67371265,262144,67108865 +.long 67371009,67109120,262401,67371008 +.long 262400,0,67108864,262401 +.long 67371264,256,1,262144 +.long 257,262145,67371008,67109121 +.long 0,67371264,262400,67371009 +.long 262145,67108864,67371265,1 +.long 262401,67108865,67108864,67371265 +.long 262144,67109120,67109121,262400 +.long 67109120,0,67371009,257 +.long 67108865,262401,256,67371008 +.long 4198408,268439552,8,272633864 +.long 0,272629760,268439560,4194312 +.long 272633856,268435464,268435456,4104 +.long 268435464,4198408,4194304,268435456 +.long 272629768,4198400,4096,8 +.long 4198400,268439560,272629760,4096 +.long 4104,0,4194312,272633856 +.long 268439552,272629768,272633864,4194304 +.long 272629768,4104,4194304,268435464 +.long 4198400,268439552,8,272629760 +.long 268439560,0,4096,4194312 +.long 0,272629768,272633856,4096 +.long 268435456,272633864,4198408,4194304 +.long 272633864,8,268439552,4198408 +.long 4194312,4198400,272629760,268439560 +.long 4104,268435456,268435464,272633856 +.long 134217728,65536,1024,134284320 +.long 134283296,134218752,66592,134283264 +.long 65536,32,134217760,66560 +.long 134218784,134283296,134284288,0 +.long 66560,134217728,65568,1056 +.long 134218752,66592,0,134217760 +.long 32,134218784,134284320,65568 +.long 134283264,1024,1056,134284288 +.long 134284288,134218784,65568,134283264 +.long 65536,32,134217760,134218752 +.long 134217728,66560,134284320,0 +.long 66592,134217728,1024,65568 +.long 134218784,1024,0,134284320 +.long 134283296,134284288,1056,65536 +.long 66560,134283296,134218752,1056 +.long 32,66592,134283264,134217760 +.long 2147483712,2097216,0,2149588992 +.long 2097216,8192,2147491904,2097152 +.long 8256,2149589056,2105344,2147483648 +.long 2147491840,2147483712,2149580800,2105408 +.long 2097152,2147491904,2149580864,0 +.long 8192,64,2149588992,2149580864 +.long 2149589056,2149580800,2147483648,8256 +.long 64,2105344,2105408,2147491840 +.long 8256,2147483648,2147491840,2105408 +.long 2149588992,2097216,0,2147491840 +.long 2147483648,8192,2149580864,2097152 +.long 2097216,2149589056,2105344,64 +.long 2149589056,2105344,2097152,2147491904 +.long 2147483712,2149580800,2105408,0 +.long 8192,2147483712,2147491904,2149588992 +.long 2149580800,8256,64,2149580864 +.long 16384,512,16777728,16777220 +.long 16794116,16388,16896,0 +.long 16777216,16777732,516,16793600 +.long 4,16794112,16793600,516 +.long 16777732,16384,16388,16794116 +.long 0,16777728,16777220,16896 +.long 16793604,16900,16794112,4 +.long 16900,16793604,512,16777216 +.long 16900,16793600,16793604,516 +.long 16384,512,16777216,16793604 +.long 16777732,16900,16896,0 +.long 512,16777220,4,16777728 +.long 0,16777732,16777728,16896 +.long 516,16384,16794116,16777216 +.long 16794112,4,16388,16794116 +.long 16777220,16794112,16793600,16388 +.long 545259648,545390592,131200,0 +.long 537001984,8388736,545259520,545390720 +.long 128,536870912,8519680,131200 +.long 8519808,537002112,536871040,545259520 +.long 131072,8519808,8388736,537001984 +.long 545390720,536871040,0,8519680 +.long 536870912,8388608,537002112,545259648 +.long 8388608,131072,545390592,128 +.long 8388608,131072,536871040,545390720 +.long 131200,536870912,0,8519680 +.long 545259648,537002112,537001984,8388736 +.long 545390592,128,8388736,537001984 +.long 545390720,8388608,545259520,536871040 +.long 8519680,131200,537002112,545259520 +.long 128,545390592,8519808,0 +.long 536870912,545259648,131072,8519808 diff --git a/deps/openssl/config/archs/BSD-x86/asm/crypto/ripemd/rmd-586.s b/deps/openssl/config/archs/BSD-x86/asm/crypto/ripemd/rmd-586.s new file mode 100644 index 00000000000000..17603e38536262 --- /dev/null +++ b/deps/openssl/config/archs/BSD-x86/asm/crypto/ripemd/rmd-586.s @@ -0,0 +1,1963 @@ +.text +.globl _ripemd160_block_asm_data_order +.type _ripemd160_block_asm_data_order,@function +.align 4 +_ripemd160_block_asm_data_order: +L_ripemd160_block_asm_data_order_begin: + movl 4(%esp),%edx + movl 8(%esp),%eax + pushl %esi + movl (%edx),%ecx + pushl %edi + movl 4(%edx),%esi + pushl %ebp + movl 8(%edx),%edi + pushl %ebx + subl $108,%esp +L000start: + + movl (%eax),%ebx + movl 4(%eax),%ebp + movl %ebx,(%esp) + movl %ebp,4(%esp) + movl 8(%eax),%ebx + movl 12(%eax),%ebp + movl %ebx,8(%esp) + movl %ebp,12(%esp) + movl 16(%eax),%ebx + movl 20(%eax),%ebp + movl %ebx,16(%esp) + movl %ebp,20(%esp) + movl 24(%eax),%ebx + movl 28(%eax),%ebp + movl %ebx,24(%esp) + movl %ebp,28(%esp) + movl 32(%eax),%ebx + movl 36(%eax),%ebp + movl %ebx,32(%esp) + movl %ebp,36(%esp) + movl 40(%eax),%ebx + movl 44(%eax),%ebp + movl %ebx,40(%esp) + movl %ebp,44(%esp) + movl 48(%eax),%ebx + movl 52(%eax),%ebp + movl %ebx,48(%esp) + movl %ebp,52(%esp) + movl 56(%eax),%ebx + movl 60(%eax),%ebp + movl %ebx,56(%esp) + movl %ebp,60(%esp) + movl %edi,%eax + movl 12(%edx),%ebx + movl 16(%edx),%ebp + # 0 + xorl %ebx,%eax + movl (%esp),%edx + xorl %esi,%eax + addl %edx,%ecx + roll $10,%edi + addl %eax,%ecx + movl %esi,%eax + roll $11,%ecx + addl %ebp,%ecx + # 1 + xorl %edi,%eax + movl 4(%esp),%edx + xorl %ecx,%eax + addl %eax,%ebp + movl %ecx,%eax + roll $10,%esi + addl %edx,%ebp + xorl %esi,%eax + roll $14,%ebp + addl %ebx,%ebp + # 2 + movl 8(%esp),%edx + xorl %ebp,%eax + addl %edx,%ebx + roll $10,%ecx + addl %eax,%ebx + movl %ebp,%eax + roll $15,%ebx + addl %edi,%ebx + # 3 + xorl %ecx,%eax + movl 12(%esp),%edx + xorl %ebx,%eax + addl %eax,%edi + movl %ebx,%eax + roll $10,%ebp + addl %edx,%edi + xorl %ebp,%eax + roll $12,%edi + addl %esi,%edi + # 4 + movl 16(%esp),%edx + xorl %edi,%eax + addl %edx,%esi + roll $10,%ebx + addl %eax,%esi + movl %edi,%eax + roll $5,%esi + addl %ecx,%esi + # 5 + xorl %ebx,%eax + movl 20(%esp),%edx + xorl %esi,%eax + addl %eax,%ecx + movl %esi,%eax + roll $10,%edi + addl %edx,%ecx + xorl %edi,%eax + roll $8,%ecx + addl %ebp,%ecx + # 6 + movl 24(%esp),%edx + xorl %ecx,%eax + addl %edx,%ebp + roll $10,%esi + addl %eax,%ebp + movl %ecx,%eax + roll $7,%ebp + addl %ebx,%ebp + # 7 + xorl %esi,%eax + movl 28(%esp),%edx + xorl %ebp,%eax + addl %eax,%ebx + movl %ebp,%eax + roll $10,%ecx + addl %edx,%ebx + xorl %ecx,%eax + roll $9,%ebx + addl %edi,%ebx + # 8 + movl 32(%esp),%edx + xorl %ebx,%eax + addl %edx,%edi + roll $10,%ebp + addl %eax,%edi + movl %ebx,%eax + roll $11,%edi + addl %esi,%edi + # 9 + xorl %ebp,%eax + movl 36(%esp),%edx + xorl %edi,%eax + addl %eax,%esi + movl %edi,%eax + roll $10,%ebx + addl %edx,%esi + xorl %ebx,%eax + roll $13,%esi + addl %ecx,%esi + # 10 + movl 40(%esp),%edx + xorl %esi,%eax + addl %edx,%ecx + roll $10,%edi + addl %eax,%ecx + movl %esi,%eax + roll $14,%ecx + addl %ebp,%ecx + # 11 + xorl %edi,%eax + movl 44(%esp),%edx + xorl %ecx,%eax + addl %eax,%ebp + movl %ecx,%eax + roll $10,%esi + addl %edx,%ebp + xorl %esi,%eax + roll $15,%ebp + addl %ebx,%ebp + # 12 + movl 48(%esp),%edx + xorl %ebp,%eax + addl %edx,%ebx + roll $10,%ecx + addl %eax,%ebx + movl %ebp,%eax + roll $6,%ebx + addl %edi,%ebx + # 13 + xorl %ecx,%eax + movl 52(%esp),%edx + xorl %ebx,%eax + addl %eax,%edi + movl %ebx,%eax + roll $10,%ebp + addl %edx,%edi + xorl %ebp,%eax + roll $7,%edi + addl %esi,%edi + # 14 + movl 56(%esp),%edx + xorl %edi,%eax + addl %edx,%esi + roll $10,%ebx + addl %eax,%esi + movl %edi,%eax + roll $9,%esi + addl %ecx,%esi + # 15 + xorl %ebx,%eax + movl 60(%esp),%edx + xorl %esi,%eax + addl %eax,%ecx + movl $-1,%eax + roll $10,%edi + addl %edx,%ecx + movl 28(%esp),%edx + roll $8,%ecx + addl %ebp,%ecx + # 16 + addl %edx,%ebp + movl %esi,%edx + subl %ecx,%eax + andl %ecx,%edx + andl %edi,%eax + orl %eax,%edx + movl 16(%esp),%eax + roll $10,%esi + leal 1518500249(%ebp,%edx,1),%ebp + movl $-1,%edx + roll $7,%ebp + addl %ebx,%ebp + # 17 + addl %eax,%ebx + movl %ecx,%eax + subl %ebp,%edx + andl %ebp,%eax + andl %esi,%edx + orl %edx,%eax + movl 52(%esp),%edx + roll $10,%ecx + leal 1518500249(%ebx,%eax,1),%ebx + movl $-1,%eax + roll $6,%ebx + addl %edi,%ebx + # 18 + addl %edx,%edi + movl %ebp,%edx + subl %ebx,%eax + andl %ebx,%edx + andl %ecx,%eax + orl %eax,%edx + movl 4(%esp),%eax + roll $10,%ebp + leal 1518500249(%edi,%edx,1),%edi + movl $-1,%edx + roll $8,%edi + addl %esi,%edi + # 19 + addl %eax,%esi + movl %ebx,%eax + subl %edi,%edx + andl %edi,%eax + andl %ebp,%edx + orl %edx,%eax + movl 40(%esp),%edx + roll $10,%ebx + leal 1518500249(%esi,%eax,1),%esi + movl $-1,%eax + roll $13,%esi + addl %ecx,%esi + # 20 + addl %edx,%ecx + movl %edi,%edx + subl %esi,%eax + andl %esi,%edx + andl %ebx,%eax + orl %eax,%edx + movl 24(%esp),%eax + roll $10,%edi + leal 1518500249(%ecx,%edx,1),%ecx + movl $-1,%edx + roll $11,%ecx + addl %ebp,%ecx + # 21 + addl %eax,%ebp + movl %esi,%eax + subl %ecx,%edx + andl %ecx,%eax + andl %edi,%edx + orl %edx,%eax + movl 60(%esp),%edx + roll $10,%esi + leal 1518500249(%ebp,%eax,1),%ebp + movl $-1,%eax + roll $9,%ebp + addl %ebx,%ebp + # 22 + addl %edx,%ebx + movl %ecx,%edx + subl %ebp,%eax + andl %ebp,%edx + andl %esi,%eax + orl %eax,%edx + movl 12(%esp),%eax + roll $10,%ecx + leal 1518500249(%ebx,%edx,1),%ebx + movl $-1,%edx + roll $7,%ebx + addl %edi,%ebx + # 23 + addl %eax,%edi + movl %ebp,%eax + subl %ebx,%edx + andl %ebx,%eax + andl %ecx,%edx + orl %edx,%eax + movl 48(%esp),%edx + roll $10,%ebp + leal 1518500249(%edi,%eax,1),%edi + movl $-1,%eax + roll $15,%edi + addl %esi,%edi + # 24 + addl %edx,%esi + movl %ebx,%edx + subl %edi,%eax + andl %edi,%edx + andl %ebp,%eax + orl %eax,%edx + movl (%esp),%eax + roll $10,%ebx + leal 1518500249(%esi,%edx,1),%esi + movl $-1,%edx + roll $7,%esi + addl %ecx,%esi + # 25 + addl %eax,%ecx + movl %edi,%eax + subl %esi,%edx + andl %esi,%eax + andl %ebx,%edx + orl %edx,%eax + movl 36(%esp),%edx + roll $10,%edi + leal 1518500249(%ecx,%eax,1),%ecx + movl $-1,%eax + roll $12,%ecx + addl %ebp,%ecx + # 26 + addl %edx,%ebp + movl %esi,%edx + subl %ecx,%eax + andl %ecx,%edx + andl %edi,%eax + orl %eax,%edx + movl 20(%esp),%eax + roll $10,%esi + leal 1518500249(%ebp,%edx,1),%ebp + movl $-1,%edx + roll $15,%ebp + addl %ebx,%ebp + # 27 + addl %eax,%ebx + movl %ecx,%eax + subl %ebp,%edx + andl %ebp,%eax + andl %esi,%edx + orl %edx,%eax + movl 8(%esp),%edx + roll $10,%ecx + leal 1518500249(%ebx,%eax,1),%ebx + movl $-1,%eax + roll $9,%ebx + addl %edi,%ebx + # 28 + addl %edx,%edi + movl %ebp,%edx + subl %ebx,%eax + andl %ebx,%edx + andl %ecx,%eax + orl %eax,%edx + movl 56(%esp),%eax + roll $10,%ebp + leal 1518500249(%edi,%edx,1),%edi + movl $-1,%edx + roll $11,%edi + addl %esi,%edi + # 29 + addl %eax,%esi + movl %ebx,%eax + subl %edi,%edx + andl %edi,%eax + andl %ebp,%edx + orl %edx,%eax + movl 44(%esp),%edx + roll $10,%ebx + leal 1518500249(%esi,%eax,1),%esi + movl $-1,%eax + roll $7,%esi + addl %ecx,%esi + # 30 + addl %edx,%ecx + movl %edi,%edx + subl %esi,%eax + andl %esi,%edx + andl %ebx,%eax + orl %eax,%edx + movl 32(%esp),%eax + roll $10,%edi + leal 1518500249(%ecx,%edx,1),%ecx + movl $-1,%edx + roll $13,%ecx + addl %ebp,%ecx + # 31 + addl %eax,%ebp + movl %esi,%eax + subl %ecx,%edx + andl %ecx,%eax + andl %edi,%edx + orl %edx,%eax + movl $-1,%edx + roll $10,%esi + leal 1518500249(%ebp,%eax,1),%ebp + subl %ecx,%edx + roll $12,%ebp + addl %ebx,%ebp + # 32 + movl 12(%esp),%eax + orl %ebp,%edx + addl %eax,%ebx + xorl %esi,%edx + movl $-1,%eax + roll $10,%ecx + leal 1859775393(%ebx,%edx,1),%ebx + subl %ebp,%eax + roll $11,%ebx + addl %edi,%ebx + # 33 + movl 40(%esp),%edx + orl %ebx,%eax + addl %edx,%edi + xorl %ecx,%eax + movl $-1,%edx + roll $10,%ebp + leal 1859775393(%edi,%eax,1),%edi + subl %ebx,%edx + roll $13,%edi + addl %esi,%edi + # 34 + movl 56(%esp),%eax + orl %edi,%edx + addl %eax,%esi + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ebx + leal 1859775393(%esi,%edx,1),%esi + subl %edi,%eax + roll $6,%esi + addl %ecx,%esi + # 35 + movl 16(%esp),%edx + orl %esi,%eax + addl %edx,%ecx + xorl %ebx,%eax + movl $-1,%edx + roll $10,%edi + leal 1859775393(%ecx,%eax,1),%ecx + subl %esi,%edx + roll $7,%ecx + addl %ebp,%ecx + # 36 + movl 36(%esp),%eax + orl %ecx,%edx + addl %eax,%ebp + xorl %edi,%edx + movl $-1,%eax + roll $10,%esi + leal 1859775393(%ebp,%edx,1),%ebp + subl %ecx,%eax + roll $14,%ebp + addl %ebx,%ebp + # 37 + movl 60(%esp),%edx + orl %ebp,%eax + addl %edx,%ebx + xorl %esi,%eax + movl $-1,%edx + roll $10,%ecx + leal 1859775393(%ebx,%eax,1),%ebx + subl %ebp,%edx + roll $9,%ebx + addl %edi,%ebx + # 38 + movl 32(%esp),%eax + orl %ebx,%edx + addl %eax,%edi + xorl %ecx,%edx + movl $-1,%eax + roll $10,%ebp + leal 1859775393(%edi,%edx,1),%edi + subl %ebx,%eax + roll $13,%edi + addl %esi,%edi + # 39 + movl 4(%esp),%edx + orl %edi,%eax + addl %edx,%esi + xorl %ebp,%eax + movl $-1,%edx + roll $10,%ebx + leal 1859775393(%esi,%eax,1),%esi + subl %edi,%edx + roll $15,%esi + addl %ecx,%esi + # 40 + movl 8(%esp),%eax + orl %esi,%edx + addl %eax,%ecx + xorl %ebx,%edx + movl $-1,%eax + roll $10,%edi + leal 1859775393(%ecx,%edx,1),%ecx + subl %esi,%eax + roll $14,%ecx + addl %ebp,%ecx + # 41 + movl 28(%esp),%edx + orl %ecx,%eax + addl %edx,%ebp + xorl %edi,%eax + movl $-1,%edx + roll $10,%esi + leal 1859775393(%ebp,%eax,1),%ebp + subl %ecx,%edx + roll $8,%ebp + addl %ebx,%ebp + # 42 + movl (%esp),%eax + orl %ebp,%edx + addl %eax,%ebx + xorl %esi,%edx + movl $-1,%eax + roll $10,%ecx + leal 1859775393(%ebx,%edx,1),%ebx + subl %ebp,%eax + roll $13,%ebx + addl %edi,%ebx + # 43 + movl 24(%esp),%edx + orl %ebx,%eax + addl %edx,%edi + xorl %ecx,%eax + movl $-1,%edx + roll $10,%ebp + leal 1859775393(%edi,%eax,1),%edi + subl %ebx,%edx + roll $6,%edi + addl %esi,%edi + # 44 + movl 52(%esp),%eax + orl %edi,%edx + addl %eax,%esi + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ebx + leal 1859775393(%esi,%edx,1),%esi + subl %edi,%eax + roll $5,%esi + addl %ecx,%esi + # 45 + movl 44(%esp),%edx + orl %esi,%eax + addl %edx,%ecx + xorl %ebx,%eax + movl $-1,%edx + roll $10,%edi + leal 1859775393(%ecx,%eax,1),%ecx + subl %esi,%edx + roll $12,%ecx + addl %ebp,%ecx + # 46 + movl 20(%esp),%eax + orl %ecx,%edx + addl %eax,%ebp + xorl %edi,%edx + movl $-1,%eax + roll $10,%esi + leal 1859775393(%ebp,%edx,1),%ebp + subl %ecx,%eax + roll $7,%ebp + addl %ebx,%ebp + # 47 + movl 48(%esp),%edx + orl %ebp,%eax + addl %edx,%ebx + xorl %esi,%eax + movl $-1,%edx + roll $10,%ecx + leal 1859775393(%ebx,%eax,1),%ebx + movl %ecx,%eax + roll $5,%ebx + addl %edi,%ebx + # 48 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 4(%esp),%eax + roll $10,%ebp + leal 2400959708(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + movl %ebp,%eax + roll $11,%edi + addl %esi,%edi + # 49 + subl %ebp,%edx + andl %edi,%eax + andl %ebx,%edx + orl %eax,%edx + movl 36(%esp),%eax + roll $10,%ebx + leal 2400959708(%esi,%edx,1),%esi + movl $-1,%edx + addl %eax,%esi + movl %ebx,%eax + roll $12,%esi + addl %ecx,%esi + # 50 + subl %ebx,%edx + andl %esi,%eax + andl %edi,%edx + orl %eax,%edx + movl 44(%esp),%eax + roll $10,%edi + leal 2400959708(%ecx,%edx,1),%ecx + movl $-1,%edx + addl %eax,%ecx + movl %edi,%eax + roll $14,%ecx + addl %ebp,%ecx + # 51 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 40(%esp),%eax + roll $10,%esi + leal 2400959708(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + movl %esi,%eax + roll $15,%ebp + addl %ebx,%ebp + # 52 + subl %esi,%edx + andl %ebp,%eax + andl %ecx,%edx + orl %eax,%edx + movl (%esp),%eax + roll $10,%ecx + leal 2400959708(%ebx,%edx,1),%ebx + movl $-1,%edx + addl %eax,%ebx + movl %ecx,%eax + roll $14,%ebx + addl %edi,%ebx + # 53 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 32(%esp),%eax + roll $10,%ebp + leal 2400959708(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + movl %ebp,%eax + roll $15,%edi + addl %esi,%edi + # 54 + subl %ebp,%edx + andl %edi,%eax + andl %ebx,%edx + orl %eax,%edx + movl 48(%esp),%eax + roll $10,%ebx + leal 2400959708(%esi,%edx,1),%esi + movl $-1,%edx + addl %eax,%esi + movl %ebx,%eax + roll $9,%esi + addl %ecx,%esi + # 55 + subl %ebx,%edx + andl %esi,%eax + andl %edi,%edx + orl %eax,%edx + movl 16(%esp),%eax + roll $10,%edi + leal 2400959708(%ecx,%edx,1),%ecx + movl $-1,%edx + addl %eax,%ecx + movl %edi,%eax + roll $8,%ecx + addl %ebp,%ecx + # 56 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 52(%esp),%eax + roll $10,%esi + leal 2400959708(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + movl %esi,%eax + roll $9,%ebp + addl %ebx,%ebp + # 57 + subl %esi,%edx + andl %ebp,%eax + andl %ecx,%edx + orl %eax,%edx + movl 12(%esp),%eax + roll $10,%ecx + leal 2400959708(%ebx,%edx,1),%ebx + movl $-1,%edx + addl %eax,%ebx + movl %ecx,%eax + roll $14,%ebx + addl %edi,%ebx + # 58 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 28(%esp),%eax + roll $10,%ebp + leal 2400959708(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + movl %ebp,%eax + roll $5,%edi + addl %esi,%edi + # 59 + subl %ebp,%edx + andl %edi,%eax + andl %ebx,%edx + orl %eax,%edx + movl 60(%esp),%eax + roll $10,%ebx + leal 2400959708(%esi,%edx,1),%esi + movl $-1,%edx + addl %eax,%esi + movl %ebx,%eax + roll $6,%esi + addl %ecx,%esi + # 60 + subl %ebx,%edx + andl %esi,%eax + andl %edi,%edx + orl %eax,%edx + movl 56(%esp),%eax + roll $10,%edi + leal 2400959708(%ecx,%edx,1),%ecx + movl $-1,%edx + addl %eax,%ecx + movl %edi,%eax + roll $8,%ecx + addl %ebp,%ecx + # 61 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 20(%esp),%eax + roll $10,%esi + leal 2400959708(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + movl %esi,%eax + roll $6,%ebp + addl %ebx,%ebp + # 62 + subl %esi,%edx + andl %ebp,%eax + andl %ecx,%edx + orl %eax,%edx + movl 24(%esp),%eax + roll $10,%ecx + leal 2400959708(%ebx,%edx,1),%ebx + movl $-1,%edx + addl %eax,%ebx + movl %ecx,%eax + roll $5,%ebx + addl %edi,%ebx + # 63 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 8(%esp),%eax + roll $10,%ebp + leal 2400959708(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + subl %ebp,%edx + roll $12,%edi + addl %esi,%edi + # 64 + movl 16(%esp),%eax + orl %ebx,%edx + addl %eax,%esi + xorl %edi,%edx + movl $-1,%eax + roll $10,%ebx + leal 2840853838(%esi,%edx,1),%esi + subl %ebx,%eax + roll $9,%esi + addl %ecx,%esi + # 65 + movl (%esp),%edx + orl %edi,%eax + addl %edx,%ecx + xorl %esi,%eax + movl $-1,%edx + roll $10,%edi + leal 2840853838(%ecx,%eax,1),%ecx + subl %edi,%edx + roll $15,%ecx + addl %ebp,%ecx + # 66 + movl 20(%esp),%eax + orl %esi,%edx + addl %eax,%ebp + xorl %ecx,%edx + movl $-1,%eax + roll $10,%esi + leal 2840853838(%ebp,%edx,1),%ebp + subl %esi,%eax + roll $5,%ebp + addl %ebx,%ebp + # 67 + movl 36(%esp),%edx + orl %ecx,%eax + addl %edx,%ebx + xorl %ebp,%eax + movl $-1,%edx + roll $10,%ecx + leal 2840853838(%ebx,%eax,1),%ebx + subl %ecx,%edx + roll $11,%ebx + addl %edi,%ebx + # 68 + movl 28(%esp),%eax + orl %ebp,%edx + addl %eax,%edi + xorl %ebx,%edx + movl $-1,%eax + roll $10,%ebp + leal 2840853838(%edi,%edx,1),%edi + subl %ebp,%eax + roll $6,%edi + addl %esi,%edi + # 69 + movl 48(%esp),%edx + orl %ebx,%eax + addl %edx,%esi + xorl %edi,%eax + movl $-1,%edx + roll $10,%ebx + leal 2840853838(%esi,%eax,1),%esi + subl %ebx,%edx + roll $8,%esi + addl %ecx,%esi + # 70 + movl 8(%esp),%eax + orl %edi,%edx + addl %eax,%ecx + xorl %esi,%edx + movl $-1,%eax + roll $10,%edi + leal 2840853838(%ecx,%edx,1),%ecx + subl %edi,%eax + roll $13,%ecx + addl %ebp,%ecx + # 71 + movl 40(%esp),%edx + orl %esi,%eax + addl %edx,%ebp + xorl %ecx,%eax + movl $-1,%edx + roll $10,%esi + leal 2840853838(%ebp,%eax,1),%ebp + subl %esi,%edx + roll $12,%ebp + addl %ebx,%ebp + # 72 + movl 56(%esp),%eax + orl %ecx,%edx + addl %eax,%ebx + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ecx + leal 2840853838(%ebx,%edx,1),%ebx + subl %ecx,%eax + roll $5,%ebx + addl %edi,%ebx + # 73 + movl 4(%esp),%edx + orl %ebp,%eax + addl %edx,%edi + xorl %ebx,%eax + movl $-1,%edx + roll $10,%ebp + leal 2840853838(%edi,%eax,1),%edi + subl %ebp,%edx + roll $12,%edi + addl %esi,%edi + # 74 + movl 12(%esp),%eax + orl %ebx,%edx + addl %eax,%esi + xorl %edi,%edx + movl $-1,%eax + roll $10,%ebx + leal 2840853838(%esi,%edx,1),%esi + subl %ebx,%eax + roll $13,%esi + addl %ecx,%esi + # 75 + movl 32(%esp),%edx + orl %edi,%eax + addl %edx,%ecx + xorl %esi,%eax + movl $-1,%edx + roll $10,%edi + leal 2840853838(%ecx,%eax,1),%ecx + subl %edi,%edx + roll $14,%ecx + addl %ebp,%ecx + # 76 + movl 44(%esp),%eax + orl %esi,%edx + addl %eax,%ebp + xorl %ecx,%edx + movl $-1,%eax + roll $10,%esi + leal 2840853838(%ebp,%edx,1),%ebp + subl %esi,%eax + roll $11,%ebp + addl %ebx,%ebp + # 77 + movl 24(%esp),%edx + orl %ecx,%eax + addl %edx,%ebx + xorl %ebp,%eax + movl $-1,%edx + roll $10,%ecx + leal 2840853838(%ebx,%eax,1),%ebx + subl %ecx,%edx + roll $8,%ebx + addl %edi,%ebx + # 78 + movl 60(%esp),%eax + orl %ebp,%edx + addl %eax,%edi + xorl %ebx,%edx + movl $-1,%eax + roll $10,%ebp + leal 2840853838(%edi,%edx,1),%edi + subl %ebp,%eax + roll $5,%edi + addl %esi,%edi + # 79 + movl 52(%esp),%edx + orl %ebx,%eax + addl %edx,%esi + xorl %edi,%eax + movl 128(%esp),%edx + roll $10,%ebx + leal 2840853838(%esi,%eax,1),%esi + movl %ecx,64(%esp) + roll $6,%esi + addl %ecx,%esi + movl (%edx),%ecx + movl %esi,68(%esp) + movl %edi,72(%esp) + movl 4(%edx),%esi + movl %ebx,76(%esp) + movl 8(%edx),%edi + movl %ebp,80(%esp) + movl 12(%edx),%ebx + movl 16(%edx),%ebp + # 80 + movl $-1,%edx + subl %ebx,%edx + movl 20(%esp),%eax + orl %edi,%edx + addl %eax,%ecx + xorl %esi,%edx + movl $-1,%eax + roll $10,%edi + leal 1352829926(%ecx,%edx,1),%ecx + subl %edi,%eax + roll $8,%ecx + addl %ebp,%ecx + # 81 + movl 56(%esp),%edx + orl %esi,%eax + addl %edx,%ebp + xorl %ecx,%eax + movl $-1,%edx + roll $10,%esi + leal 1352829926(%ebp,%eax,1),%ebp + subl %esi,%edx + roll $9,%ebp + addl %ebx,%ebp + # 82 + movl 28(%esp),%eax + orl %ecx,%edx + addl %eax,%ebx + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ecx + leal 1352829926(%ebx,%edx,1),%ebx + subl %ecx,%eax + roll $9,%ebx + addl %edi,%ebx + # 83 + movl (%esp),%edx + orl %ebp,%eax + addl %edx,%edi + xorl %ebx,%eax + movl $-1,%edx + roll $10,%ebp + leal 1352829926(%edi,%eax,1),%edi + subl %ebp,%edx + roll $11,%edi + addl %esi,%edi + # 84 + movl 36(%esp),%eax + orl %ebx,%edx + addl %eax,%esi + xorl %edi,%edx + movl $-1,%eax + roll $10,%ebx + leal 1352829926(%esi,%edx,1),%esi + subl %ebx,%eax + roll $13,%esi + addl %ecx,%esi + # 85 + movl 8(%esp),%edx + orl %edi,%eax + addl %edx,%ecx + xorl %esi,%eax + movl $-1,%edx + roll $10,%edi + leal 1352829926(%ecx,%eax,1),%ecx + subl %edi,%edx + roll $15,%ecx + addl %ebp,%ecx + # 86 + movl 44(%esp),%eax + orl %esi,%edx + addl %eax,%ebp + xorl %ecx,%edx + movl $-1,%eax + roll $10,%esi + leal 1352829926(%ebp,%edx,1),%ebp + subl %esi,%eax + roll $15,%ebp + addl %ebx,%ebp + # 87 + movl 16(%esp),%edx + orl %ecx,%eax + addl %edx,%ebx + xorl %ebp,%eax + movl $-1,%edx + roll $10,%ecx + leal 1352829926(%ebx,%eax,1),%ebx + subl %ecx,%edx + roll $5,%ebx + addl %edi,%ebx + # 88 + movl 52(%esp),%eax + orl %ebp,%edx + addl %eax,%edi + xorl %ebx,%edx + movl $-1,%eax + roll $10,%ebp + leal 1352829926(%edi,%edx,1),%edi + subl %ebp,%eax + roll $7,%edi + addl %esi,%edi + # 89 + movl 24(%esp),%edx + orl %ebx,%eax + addl %edx,%esi + xorl %edi,%eax + movl $-1,%edx + roll $10,%ebx + leal 1352829926(%esi,%eax,1),%esi + subl %ebx,%edx + roll $7,%esi + addl %ecx,%esi + # 90 + movl 60(%esp),%eax + orl %edi,%edx + addl %eax,%ecx + xorl %esi,%edx + movl $-1,%eax + roll $10,%edi + leal 1352829926(%ecx,%edx,1),%ecx + subl %edi,%eax + roll $8,%ecx + addl %ebp,%ecx + # 91 + movl 32(%esp),%edx + orl %esi,%eax + addl %edx,%ebp + xorl %ecx,%eax + movl $-1,%edx + roll $10,%esi + leal 1352829926(%ebp,%eax,1),%ebp + subl %esi,%edx + roll $11,%ebp + addl %ebx,%ebp + # 92 + movl 4(%esp),%eax + orl %ecx,%edx + addl %eax,%ebx + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ecx + leal 1352829926(%ebx,%edx,1),%ebx + subl %ecx,%eax + roll $14,%ebx + addl %edi,%ebx + # 93 + movl 40(%esp),%edx + orl %ebp,%eax + addl %edx,%edi + xorl %ebx,%eax + movl $-1,%edx + roll $10,%ebp + leal 1352829926(%edi,%eax,1),%edi + subl %ebp,%edx + roll $14,%edi + addl %esi,%edi + # 94 + movl 12(%esp),%eax + orl %ebx,%edx + addl %eax,%esi + xorl %edi,%edx + movl $-1,%eax + roll $10,%ebx + leal 1352829926(%esi,%edx,1),%esi + subl %ebx,%eax + roll $12,%esi + addl %ecx,%esi + # 95 + movl 48(%esp),%edx + orl %edi,%eax + addl %edx,%ecx + xorl %esi,%eax + movl $-1,%edx + roll $10,%edi + leal 1352829926(%ecx,%eax,1),%ecx + movl %edi,%eax + roll $6,%ecx + addl %ebp,%ecx + # 96 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 24(%esp),%eax + roll $10,%esi + leal 1548603684(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + movl %esi,%eax + roll $9,%ebp + addl %ebx,%ebp + # 97 + subl %esi,%edx + andl %ebp,%eax + andl %ecx,%edx + orl %eax,%edx + movl 44(%esp),%eax + roll $10,%ecx + leal 1548603684(%ebx,%edx,1),%ebx + movl $-1,%edx + addl %eax,%ebx + movl %ecx,%eax + roll $13,%ebx + addl %edi,%ebx + # 98 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 12(%esp),%eax + roll $10,%ebp + leal 1548603684(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + movl %ebp,%eax + roll $15,%edi + addl %esi,%edi + # 99 + subl %ebp,%edx + andl %edi,%eax + andl %ebx,%edx + orl %eax,%edx + movl 28(%esp),%eax + roll $10,%ebx + leal 1548603684(%esi,%edx,1),%esi + movl $-1,%edx + addl %eax,%esi + movl %ebx,%eax + roll $7,%esi + addl %ecx,%esi + # 100 + subl %ebx,%edx + andl %esi,%eax + andl %edi,%edx + orl %eax,%edx + movl (%esp),%eax + roll $10,%edi + leal 1548603684(%ecx,%edx,1),%ecx + movl $-1,%edx + addl %eax,%ecx + movl %edi,%eax + roll $12,%ecx + addl %ebp,%ecx + # 101 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 52(%esp),%eax + roll $10,%esi + leal 1548603684(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + movl %esi,%eax + roll $8,%ebp + addl %ebx,%ebp + # 102 + subl %esi,%edx + andl %ebp,%eax + andl %ecx,%edx + orl %eax,%edx + movl 20(%esp),%eax + roll $10,%ecx + leal 1548603684(%ebx,%edx,1),%ebx + movl $-1,%edx + addl %eax,%ebx + movl %ecx,%eax + roll $9,%ebx + addl %edi,%ebx + # 103 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 40(%esp),%eax + roll $10,%ebp + leal 1548603684(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + movl %ebp,%eax + roll $11,%edi + addl %esi,%edi + # 104 + subl %ebp,%edx + andl %edi,%eax + andl %ebx,%edx + orl %eax,%edx + movl 56(%esp),%eax + roll $10,%ebx + leal 1548603684(%esi,%edx,1),%esi + movl $-1,%edx + addl %eax,%esi + movl %ebx,%eax + roll $7,%esi + addl %ecx,%esi + # 105 + subl %ebx,%edx + andl %esi,%eax + andl %edi,%edx + orl %eax,%edx + movl 60(%esp),%eax + roll $10,%edi + leal 1548603684(%ecx,%edx,1),%ecx + movl $-1,%edx + addl %eax,%ecx + movl %edi,%eax + roll $7,%ecx + addl %ebp,%ecx + # 106 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 32(%esp),%eax + roll $10,%esi + leal 1548603684(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + movl %esi,%eax + roll $12,%ebp + addl %ebx,%ebp + # 107 + subl %esi,%edx + andl %ebp,%eax + andl %ecx,%edx + orl %eax,%edx + movl 48(%esp),%eax + roll $10,%ecx + leal 1548603684(%ebx,%edx,1),%ebx + movl $-1,%edx + addl %eax,%ebx + movl %ecx,%eax + roll $7,%ebx + addl %edi,%ebx + # 108 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 16(%esp),%eax + roll $10,%ebp + leal 1548603684(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + movl %ebp,%eax + roll $6,%edi + addl %esi,%edi + # 109 + subl %ebp,%edx + andl %edi,%eax + andl %ebx,%edx + orl %eax,%edx + movl 36(%esp),%eax + roll $10,%ebx + leal 1548603684(%esi,%edx,1),%esi + movl $-1,%edx + addl %eax,%esi + movl %ebx,%eax + roll $15,%esi + addl %ecx,%esi + # 110 + subl %ebx,%edx + andl %esi,%eax + andl %edi,%edx + orl %eax,%edx + movl 4(%esp),%eax + roll $10,%edi + leal 1548603684(%ecx,%edx,1),%ecx + movl $-1,%edx + addl %eax,%ecx + movl %edi,%eax + roll $13,%ecx + addl %ebp,%ecx + # 111 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 8(%esp),%eax + roll $10,%esi + leal 1548603684(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + subl %ecx,%edx + roll $11,%ebp + addl %ebx,%ebp + # 112 + movl 60(%esp),%eax + orl %ebp,%edx + addl %eax,%ebx + xorl %esi,%edx + movl $-1,%eax + roll $10,%ecx + leal 1836072691(%ebx,%edx,1),%ebx + subl %ebp,%eax + roll $9,%ebx + addl %edi,%ebx + # 113 + movl 20(%esp),%edx + orl %ebx,%eax + addl %edx,%edi + xorl %ecx,%eax + movl $-1,%edx + roll $10,%ebp + leal 1836072691(%edi,%eax,1),%edi + subl %ebx,%edx + roll $7,%edi + addl %esi,%edi + # 114 + movl 4(%esp),%eax + orl %edi,%edx + addl %eax,%esi + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ebx + leal 1836072691(%esi,%edx,1),%esi + subl %edi,%eax + roll $15,%esi + addl %ecx,%esi + # 115 + movl 12(%esp),%edx + orl %esi,%eax + addl %edx,%ecx + xorl %ebx,%eax + movl $-1,%edx + roll $10,%edi + leal 1836072691(%ecx,%eax,1),%ecx + subl %esi,%edx + roll $11,%ecx + addl %ebp,%ecx + # 116 + movl 28(%esp),%eax + orl %ecx,%edx + addl %eax,%ebp + xorl %edi,%edx + movl $-1,%eax + roll $10,%esi + leal 1836072691(%ebp,%edx,1),%ebp + subl %ecx,%eax + roll $8,%ebp + addl %ebx,%ebp + # 117 + movl 56(%esp),%edx + orl %ebp,%eax + addl %edx,%ebx + xorl %esi,%eax + movl $-1,%edx + roll $10,%ecx + leal 1836072691(%ebx,%eax,1),%ebx + subl %ebp,%edx + roll $6,%ebx + addl %edi,%ebx + # 118 + movl 24(%esp),%eax + orl %ebx,%edx + addl %eax,%edi + xorl %ecx,%edx + movl $-1,%eax + roll $10,%ebp + leal 1836072691(%edi,%edx,1),%edi + subl %ebx,%eax + roll $6,%edi + addl %esi,%edi + # 119 + movl 36(%esp),%edx + orl %edi,%eax + addl %edx,%esi + xorl %ebp,%eax + movl $-1,%edx + roll $10,%ebx + leal 1836072691(%esi,%eax,1),%esi + subl %edi,%edx + roll $14,%esi + addl %ecx,%esi + # 120 + movl 44(%esp),%eax + orl %esi,%edx + addl %eax,%ecx + xorl %ebx,%edx + movl $-1,%eax + roll $10,%edi + leal 1836072691(%ecx,%edx,1),%ecx + subl %esi,%eax + roll $12,%ecx + addl %ebp,%ecx + # 121 + movl 32(%esp),%edx + orl %ecx,%eax + addl %edx,%ebp + xorl %edi,%eax + movl $-1,%edx + roll $10,%esi + leal 1836072691(%ebp,%eax,1),%ebp + subl %ecx,%edx + roll $13,%ebp + addl %ebx,%ebp + # 122 + movl 48(%esp),%eax + orl %ebp,%edx + addl %eax,%ebx + xorl %esi,%edx + movl $-1,%eax + roll $10,%ecx + leal 1836072691(%ebx,%edx,1),%ebx + subl %ebp,%eax + roll $5,%ebx + addl %edi,%ebx + # 123 + movl 8(%esp),%edx + orl %ebx,%eax + addl %edx,%edi + xorl %ecx,%eax + movl $-1,%edx + roll $10,%ebp + leal 1836072691(%edi,%eax,1),%edi + subl %ebx,%edx + roll $14,%edi + addl %esi,%edi + # 124 + movl 40(%esp),%eax + orl %edi,%edx + addl %eax,%esi + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ebx + leal 1836072691(%esi,%edx,1),%esi + subl %edi,%eax + roll $13,%esi + addl %ecx,%esi + # 125 + movl (%esp),%edx + orl %esi,%eax + addl %edx,%ecx + xorl %ebx,%eax + movl $-1,%edx + roll $10,%edi + leal 1836072691(%ecx,%eax,1),%ecx + subl %esi,%edx + roll $13,%ecx + addl %ebp,%ecx + # 126 + movl 16(%esp),%eax + orl %ecx,%edx + addl %eax,%ebp + xorl %edi,%edx + movl $-1,%eax + roll $10,%esi + leal 1836072691(%ebp,%edx,1),%ebp + subl %ecx,%eax + roll $7,%ebp + addl %ebx,%ebp + # 127 + movl 52(%esp),%edx + orl %ebp,%eax + addl %edx,%ebx + xorl %esi,%eax + movl 32(%esp),%edx + roll $10,%ecx + leal 1836072691(%ebx,%eax,1),%ebx + movl $-1,%eax + roll $5,%ebx + addl %edi,%ebx + # 128 + addl %edx,%edi + movl %ebp,%edx + subl %ebx,%eax + andl %ebx,%edx + andl %ecx,%eax + orl %eax,%edx + movl 24(%esp),%eax + roll $10,%ebp + leal 2053994217(%edi,%edx,1),%edi + movl $-1,%edx + roll $15,%edi + addl %esi,%edi + # 129 + addl %eax,%esi + movl %ebx,%eax + subl %edi,%edx + andl %edi,%eax + andl %ebp,%edx + orl %edx,%eax + movl 16(%esp),%edx + roll $10,%ebx + leal 2053994217(%esi,%eax,1),%esi + movl $-1,%eax + roll $5,%esi + addl %ecx,%esi + # 130 + addl %edx,%ecx + movl %edi,%edx + subl %esi,%eax + andl %esi,%edx + andl %ebx,%eax + orl %eax,%edx + movl 4(%esp),%eax + roll $10,%edi + leal 2053994217(%ecx,%edx,1),%ecx + movl $-1,%edx + roll $8,%ecx + addl %ebp,%ecx + # 131 + addl %eax,%ebp + movl %esi,%eax + subl %ecx,%edx + andl %ecx,%eax + andl %edi,%edx + orl %edx,%eax + movl 12(%esp),%edx + roll $10,%esi + leal 2053994217(%ebp,%eax,1),%ebp + movl $-1,%eax + roll $11,%ebp + addl %ebx,%ebp + # 132 + addl %edx,%ebx + movl %ecx,%edx + subl %ebp,%eax + andl %ebp,%edx + andl %esi,%eax + orl %eax,%edx + movl 44(%esp),%eax + roll $10,%ecx + leal 2053994217(%ebx,%edx,1),%ebx + movl $-1,%edx + roll $14,%ebx + addl %edi,%ebx + # 133 + addl %eax,%edi + movl %ebp,%eax + subl %ebx,%edx + andl %ebx,%eax + andl %ecx,%edx + orl %edx,%eax + movl 60(%esp),%edx + roll $10,%ebp + leal 2053994217(%edi,%eax,1),%edi + movl $-1,%eax + roll $14,%edi + addl %esi,%edi + # 134 + addl %edx,%esi + movl %ebx,%edx + subl %edi,%eax + andl %edi,%edx + andl %ebp,%eax + orl %eax,%edx + movl (%esp),%eax + roll $10,%ebx + leal 2053994217(%esi,%edx,1),%esi + movl $-1,%edx + roll $6,%esi + addl %ecx,%esi + # 135 + addl %eax,%ecx + movl %edi,%eax + subl %esi,%edx + andl %esi,%eax + andl %ebx,%edx + orl %edx,%eax + movl 20(%esp),%edx + roll $10,%edi + leal 2053994217(%ecx,%eax,1),%ecx + movl $-1,%eax + roll $14,%ecx + addl %ebp,%ecx + # 136 + addl %edx,%ebp + movl %esi,%edx + subl %ecx,%eax + andl %ecx,%edx + andl %edi,%eax + orl %eax,%edx + movl 48(%esp),%eax + roll $10,%esi + leal 2053994217(%ebp,%edx,1),%ebp + movl $-1,%edx + roll $6,%ebp + addl %ebx,%ebp + # 137 + addl %eax,%ebx + movl %ecx,%eax + subl %ebp,%edx + andl %ebp,%eax + andl %esi,%edx + orl %edx,%eax + movl 8(%esp),%edx + roll $10,%ecx + leal 2053994217(%ebx,%eax,1),%ebx + movl $-1,%eax + roll $9,%ebx + addl %edi,%ebx + # 138 + addl %edx,%edi + movl %ebp,%edx + subl %ebx,%eax + andl %ebx,%edx + andl %ecx,%eax + orl %eax,%edx + movl 52(%esp),%eax + roll $10,%ebp + leal 2053994217(%edi,%edx,1),%edi + movl $-1,%edx + roll $12,%edi + addl %esi,%edi + # 139 + addl %eax,%esi + movl %ebx,%eax + subl %edi,%edx + andl %edi,%eax + andl %ebp,%edx + orl %edx,%eax + movl 36(%esp),%edx + roll $10,%ebx + leal 2053994217(%esi,%eax,1),%esi + movl $-1,%eax + roll $9,%esi + addl %ecx,%esi + # 140 + addl %edx,%ecx + movl %edi,%edx + subl %esi,%eax + andl %esi,%edx + andl %ebx,%eax + orl %eax,%edx + movl 28(%esp),%eax + roll $10,%edi + leal 2053994217(%ecx,%edx,1),%ecx + movl $-1,%edx + roll $12,%ecx + addl %ebp,%ecx + # 141 + addl %eax,%ebp + movl %esi,%eax + subl %ecx,%edx + andl %ecx,%eax + andl %edi,%edx + orl %edx,%eax + movl 40(%esp),%edx + roll $10,%esi + leal 2053994217(%ebp,%eax,1),%ebp + movl $-1,%eax + roll $5,%ebp + addl %ebx,%ebp + # 142 + addl %edx,%ebx + movl %ecx,%edx + subl %ebp,%eax + andl %ebp,%edx + andl %esi,%eax + orl %eax,%edx + movl 56(%esp),%eax + roll $10,%ecx + leal 2053994217(%ebx,%edx,1),%ebx + movl $-1,%edx + roll $15,%ebx + addl %edi,%ebx + # 143 + addl %eax,%edi + movl %ebp,%eax + subl %ebx,%edx + andl %ebx,%eax + andl %ecx,%edx + orl %eax,%edx + movl %ebx,%eax + roll $10,%ebp + leal 2053994217(%edi,%edx,1),%edi + xorl %ebp,%eax + roll $8,%edi + addl %esi,%edi + # 144 + movl 48(%esp),%edx + xorl %edi,%eax + addl %edx,%esi + roll $10,%ebx + addl %eax,%esi + movl %edi,%eax + roll $8,%esi + addl %ecx,%esi + # 145 + xorl %ebx,%eax + movl 60(%esp),%edx + xorl %esi,%eax + addl %eax,%ecx + movl %esi,%eax + roll $10,%edi + addl %edx,%ecx + xorl %edi,%eax + roll $5,%ecx + addl %ebp,%ecx + # 146 + movl 40(%esp),%edx + xorl %ecx,%eax + addl %edx,%ebp + roll $10,%esi + addl %eax,%ebp + movl %ecx,%eax + roll $12,%ebp + addl %ebx,%ebp + # 147 + xorl %esi,%eax + movl 16(%esp),%edx + xorl %ebp,%eax + addl %eax,%ebx + movl %ebp,%eax + roll $10,%ecx + addl %edx,%ebx + xorl %ecx,%eax + roll $9,%ebx + addl %edi,%ebx + # 148 + movl 4(%esp),%edx + xorl %ebx,%eax + addl %edx,%edi + roll $10,%ebp + addl %eax,%edi + movl %ebx,%eax + roll $12,%edi + addl %esi,%edi + # 149 + xorl %ebp,%eax + movl 20(%esp),%edx + xorl %edi,%eax + addl %eax,%esi + movl %edi,%eax + roll $10,%ebx + addl %edx,%esi + xorl %ebx,%eax + roll $5,%esi + addl %ecx,%esi + # 150 + movl 32(%esp),%edx + xorl %esi,%eax + addl %edx,%ecx + roll $10,%edi + addl %eax,%ecx + movl %esi,%eax + roll $14,%ecx + addl %ebp,%ecx + # 151 + xorl %edi,%eax + movl 28(%esp),%edx + xorl %ecx,%eax + addl %eax,%ebp + movl %ecx,%eax + roll $10,%esi + addl %edx,%ebp + xorl %esi,%eax + roll $6,%ebp + addl %ebx,%ebp + # 152 + movl 24(%esp),%edx + xorl %ebp,%eax + addl %edx,%ebx + roll $10,%ecx + addl %eax,%ebx + movl %ebp,%eax + roll $8,%ebx + addl %edi,%ebx + # 153 + xorl %ecx,%eax + movl 8(%esp),%edx + xorl %ebx,%eax + addl %eax,%edi + movl %ebx,%eax + roll $10,%ebp + addl %edx,%edi + xorl %ebp,%eax + roll $13,%edi + addl %esi,%edi + # 154 + movl 52(%esp),%edx + xorl %edi,%eax + addl %edx,%esi + roll $10,%ebx + addl %eax,%esi + movl %edi,%eax + roll $6,%esi + addl %ecx,%esi + # 155 + xorl %ebx,%eax + movl 56(%esp),%edx + xorl %esi,%eax + addl %eax,%ecx + movl %esi,%eax + roll $10,%edi + addl %edx,%ecx + xorl %edi,%eax + roll $5,%ecx + addl %ebp,%ecx + # 156 + movl (%esp),%edx + xorl %ecx,%eax + addl %edx,%ebp + roll $10,%esi + addl %eax,%ebp + movl %ecx,%eax + roll $15,%ebp + addl %ebx,%ebp + # 157 + xorl %esi,%eax + movl 12(%esp),%edx + xorl %ebp,%eax + addl %eax,%ebx + movl %ebp,%eax + roll $10,%ecx + addl %edx,%ebx + xorl %ecx,%eax + roll $13,%ebx + addl %edi,%ebx + # 158 + movl 36(%esp),%edx + xorl %ebx,%eax + addl %edx,%edi + roll $10,%ebp + addl %eax,%edi + movl %ebx,%eax + roll $11,%edi + addl %esi,%edi + # 159 + xorl %ebp,%eax + movl 44(%esp),%edx + xorl %edi,%eax + addl %eax,%esi + roll $10,%ebx + addl %edx,%esi + movl 128(%esp),%edx + roll $11,%esi + addl %ecx,%esi + movl 4(%edx),%eax + addl %eax,%ebx + movl 72(%esp),%eax + addl %eax,%ebx + movl 8(%edx),%eax + addl %eax,%ebp + movl 76(%esp),%eax + addl %eax,%ebp + movl 12(%edx),%eax + addl %eax,%ecx + movl 80(%esp),%eax + addl %eax,%ecx + movl 16(%edx),%eax + addl %eax,%esi + movl 64(%esp),%eax + addl %eax,%esi + movl (%edx),%eax + addl %eax,%edi + movl 68(%esp),%eax + addl %eax,%edi + movl 136(%esp),%eax + movl %ebx,(%edx) + movl %ebp,4(%edx) + movl %ecx,8(%edx) + subl $1,%eax + movl %esi,12(%edx) + movl %edi,16(%edx) + jle L001get_out + movl %eax,136(%esp) + movl %ecx,%edi + movl 132(%esp),%eax + movl %ebx,%ecx + addl $64,%eax + movl %ebp,%esi + movl %eax,132(%esp) + jmp L000start +L001get_out: + addl $108,%esp + popl %ebx + popl %ebp + popl %edi + popl %esi + ret diff --git a/deps/openssl/config/archs/BSD-x86/asm/crypto/sha/sha1-586.s b/deps/openssl/config/archs/BSD-x86/asm/crypto/sha/sha1-586.s new file mode 100644 index 00000000000000..42b2788199f6dc --- /dev/null +++ b/deps/openssl/config/archs/BSD-x86/asm/crypto/sha/sha1-586.s @@ -0,0 +1,3966 @@ +.text +.globl _sha1_block_data_order +.type _sha1_block_data_order,@function +.align 4 +_sha1_block_data_order: +L_sha1_block_data_order_begin: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + call L000pic_point +L000pic_point: + popl %ebp + leal __GLOBAL_OFFSET_TABLE_+[.-L000pic_point](%ebp),%esi + movl _OPENSSL_ia32cap_P@GOT(%esi),%esi + leal LK_XX_XX-L000pic_point(%ebp),%ebp + movl (%esi),%eax + movl 4(%esi),%edx + testl $512,%edx + jz L001x86 + movl 8(%esi),%ecx + testl $16777216,%eax + jz L001x86 + testl $536870912,%ecx + jnz Lshaext_shortcut + andl $268435456,%edx + andl $1073741824,%eax + orl %edx,%eax + cmpl $1342177280,%eax + je Lavx_shortcut + jmp Lssse3_shortcut +.align 4,0x90 +L001x86: + movl 20(%esp),%ebp + movl 24(%esp),%esi + movl 28(%esp),%eax + subl $76,%esp + shll $6,%eax + addl %esi,%eax + movl %eax,104(%esp) + movl 16(%ebp),%edi + jmp L002loop +.align 4,0x90 +L002loop: + movl (%esi),%eax + movl 4(%esi),%ebx + movl 8(%esi),%ecx + movl 12(%esi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + movl %eax,(%esp) + movl %ebx,4(%esp) + movl %ecx,8(%esp) + movl %edx,12(%esp) + movl 16(%esi),%eax + movl 20(%esi),%ebx + movl 24(%esi),%ecx + movl 28(%esi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + movl %eax,16(%esp) + movl %ebx,20(%esp) + movl %ecx,24(%esp) + movl %edx,28(%esp) + movl 32(%esi),%eax + movl 36(%esi),%ebx + movl 40(%esi),%ecx + movl 44(%esi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + movl %eax,32(%esp) + movl %ebx,36(%esp) + movl %ecx,40(%esp) + movl %edx,44(%esp) + movl 48(%esi),%eax + movl 52(%esi),%ebx + movl 56(%esi),%ecx + movl 60(%esi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + movl %eax,48(%esp) + movl %ebx,52(%esp) + movl %ecx,56(%esp) + movl %edx,60(%esp) + movl %esi,100(%esp) + movl (%ebp),%eax + movl 4(%ebp),%ebx + movl 8(%ebp),%ecx + movl 12(%ebp),%edx + # 00_15 0 + movl %ecx,%esi + movl %eax,%ebp + roll $5,%ebp + xorl %edx,%esi + addl %edi,%ebp + movl (%esp),%edi + andl %ebx,%esi + rorl $2,%ebx + xorl %edx,%esi + leal 1518500249(%ebp,%edi,1),%ebp + addl %esi,%ebp + # 00_15 1 + movl %ebx,%edi + movl %ebp,%esi + roll $5,%ebp + xorl %ecx,%edi + addl %edx,%ebp + movl 4(%esp),%edx + andl %eax,%edi + rorl $2,%eax + xorl %ecx,%edi + leal 1518500249(%ebp,%edx,1),%ebp + addl %edi,%ebp + # 00_15 2 + movl %eax,%edx + movl %ebp,%edi + roll $5,%ebp + xorl %ebx,%edx + addl %ecx,%ebp + movl 8(%esp),%ecx + andl %esi,%edx + rorl $2,%esi + xorl %ebx,%edx + leal 1518500249(%ebp,%ecx,1),%ebp + addl %edx,%ebp + # 00_15 3 + movl %esi,%ecx + movl %ebp,%edx + roll $5,%ebp + xorl %eax,%ecx + addl %ebx,%ebp + movl 12(%esp),%ebx + andl %edi,%ecx + rorl $2,%edi + xorl %eax,%ecx + leal 1518500249(%ebp,%ebx,1),%ebp + addl %ecx,%ebp + # 00_15 4 + movl %edi,%ebx + movl %ebp,%ecx + roll $5,%ebp + xorl %esi,%ebx + addl %eax,%ebp + movl 16(%esp),%eax + andl %edx,%ebx + rorl $2,%edx + xorl %esi,%ebx + leal 1518500249(%ebp,%eax,1),%ebp + addl %ebx,%ebp + # 00_15 5 + movl %edx,%eax + movl %ebp,%ebx + roll $5,%ebp + xorl %edi,%eax + addl %esi,%ebp + movl 20(%esp),%esi + andl %ecx,%eax + rorl $2,%ecx + xorl %edi,%eax + leal 1518500249(%ebp,%esi,1),%ebp + addl %eax,%ebp + # 00_15 6 + movl %ecx,%esi + movl %ebp,%eax + roll $5,%ebp + xorl %edx,%esi + addl %edi,%ebp + movl 24(%esp),%edi + andl %ebx,%esi + rorl $2,%ebx + xorl %edx,%esi + leal 1518500249(%ebp,%edi,1),%ebp + addl %esi,%ebp + # 00_15 7 + movl %ebx,%edi + movl %ebp,%esi + roll $5,%ebp + xorl %ecx,%edi + addl %edx,%ebp + movl 28(%esp),%edx + andl %eax,%edi + rorl $2,%eax + xorl %ecx,%edi + leal 1518500249(%ebp,%edx,1),%ebp + addl %edi,%ebp + # 00_15 8 + movl %eax,%edx + movl %ebp,%edi + roll $5,%ebp + xorl %ebx,%edx + addl %ecx,%ebp + movl 32(%esp),%ecx + andl %esi,%edx + rorl $2,%esi + xorl %ebx,%edx + leal 1518500249(%ebp,%ecx,1),%ebp + addl %edx,%ebp + # 00_15 9 + movl %esi,%ecx + movl %ebp,%edx + roll $5,%ebp + xorl %eax,%ecx + addl %ebx,%ebp + movl 36(%esp),%ebx + andl %edi,%ecx + rorl $2,%edi + xorl %eax,%ecx + leal 1518500249(%ebp,%ebx,1),%ebp + addl %ecx,%ebp + # 00_15 10 + movl %edi,%ebx + movl %ebp,%ecx + roll $5,%ebp + xorl %esi,%ebx + addl %eax,%ebp + movl 40(%esp),%eax + andl %edx,%ebx + rorl $2,%edx + xorl %esi,%ebx + leal 1518500249(%ebp,%eax,1),%ebp + addl %ebx,%ebp + # 00_15 11 + movl %edx,%eax + movl %ebp,%ebx + roll $5,%ebp + xorl %edi,%eax + addl %esi,%ebp + movl 44(%esp),%esi + andl %ecx,%eax + rorl $2,%ecx + xorl %edi,%eax + leal 1518500249(%ebp,%esi,1),%ebp + addl %eax,%ebp + # 00_15 12 + movl %ecx,%esi + movl %ebp,%eax + roll $5,%ebp + xorl %edx,%esi + addl %edi,%ebp + movl 48(%esp),%edi + andl %ebx,%esi + rorl $2,%ebx + xorl %edx,%esi + leal 1518500249(%ebp,%edi,1),%ebp + addl %esi,%ebp + # 00_15 13 + movl %ebx,%edi + movl %ebp,%esi + roll $5,%ebp + xorl %ecx,%edi + addl %edx,%ebp + movl 52(%esp),%edx + andl %eax,%edi + rorl $2,%eax + xorl %ecx,%edi + leal 1518500249(%ebp,%edx,1),%ebp + addl %edi,%ebp + # 00_15 14 + movl %eax,%edx + movl %ebp,%edi + roll $5,%ebp + xorl %ebx,%edx + addl %ecx,%ebp + movl 56(%esp),%ecx + andl %esi,%edx + rorl $2,%esi + xorl %ebx,%edx + leal 1518500249(%ebp,%ecx,1),%ebp + addl %edx,%ebp + # 00_15 15 + movl %esi,%ecx + movl %ebp,%edx + roll $5,%ebp + xorl %eax,%ecx + addl %ebx,%ebp + movl 60(%esp),%ebx + andl %edi,%ecx + rorl $2,%edi + xorl %eax,%ecx + leal 1518500249(%ebp,%ebx,1),%ebp + movl (%esp),%ebx + addl %ebp,%ecx + # 16_19 16 + movl %edi,%ebp + xorl 8(%esp),%ebx + xorl %esi,%ebp + xorl 32(%esp),%ebx + andl %edx,%ebp + xorl 52(%esp),%ebx + roll $1,%ebx + xorl %esi,%ebp + addl %ebp,%eax + movl %ecx,%ebp + rorl $2,%edx + movl %ebx,(%esp) + roll $5,%ebp + leal 1518500249(%ebx,%eax,1),%ebx + movl 4(%esp),%eax + addl %ebp,%ebx + # 16_19 17 + movl %edx,%ebp + xorl 12(%esp),%eax + xorl %edi,%ebp + xorl 36(%esp),%eax + andl %ecx,%ebp + xorl 56(%esp),%eax + roll $1,%eax + xorl %edi,%ebp + addl %ebp,%esi + movl %ebx,%ebp + rorl $2,%ecx + movl %eax,4(%esp) + roll $5,%ebp + leal 1518500249(%eax,%esi,1),%eax + movl 8(%esp),%esi + addl %ebp,%eax + # 16_19 18 + movl %ecx,%ebp + xorl 16(%esp),%esi + xorl %edx,%ebp + xorl 40(%esp),%esi + andl %ebx,%ebp + xorl 60(%esp),%esi + roll $1,%esi + xorl %edx,%ebp + addl %ebp,%edi + movl %eax,%ebp + rorl $2,%ebx + movl %esi,8(%esp) + roll $5,%ebp + leal 1518500249(%esi,%edi,1),%esi + movl 12(%esp),%edi + addl %ebp,%esi + # 16_19 19 + movl %ebx,%ebp + xorl 20(%esp),%edi + xorl %ecx,%ebp + xorl 44(%esp),%edi + andl %eax,%ebp + xorl (%esp),%edi + roll $1,%edi + xorl %ecx,%ebp + addl %ebp,%edx + movl %esi,%ebp + rorl $2,%eax + movl %edi,12(%esp) + roll $5,%ebp + leal 1518500249(%edi,%edx,1),%edi + movl 16(%esp),%edx + addl %ebp,%edi + # 20_39 20 + movl %esi,%ebp + xorl 24(%esp),%edx + xorl %eax,%ebp + xorl 48(%esp),%edx + xorl %ebx,%ebp + xorl 4(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,16(%esp) + leal 1859775393(%edx,%ecx,1),%edx + movl 20(%esp),%ecx + addl %ebp,%edx + # 20_39 21 + movl %edi,%ebp + xorl 28(%esp),%ecx + xorl %esi,%ebp + xorl 52(%esp),%ecx + xorl %eax,%ebp + xorl 8(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,20(%esp) + leal 1859775393(%ecx,%ebx,1),%ecx + movl 24(%esp),%ebx + addl %ebp,%ecx + # 20_39 22 + movl %edx,%ebp + xorl 32(%esp),%ebx + xorl %edi,%ebp + xorl 56(%esp),%ebx + xorl %esi,%ebp + xorl 12(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,24(%esp) + leal 1859775393(%ebx,%eax,1),%ebx + movl 28(%esp),%eax + addl %ebp,%ebx + # 20_39 23 + movl %ecx,%ebp + xorl 36(%esp),%eax + xorl %edx,%ebp + xorl 60(%esp),%eax + xorl %edi,%ebp + xorl 16(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,28(%esp) + leal 1859775393(%eax,%esi,1),%eax + movl 32(%esp),%esi + addl %ebp,%eax + # 20_39 24 + movl %ebx,%ebp + xorl 40(%esp),%esi + xorl %ecx,%ebp + xorl (%esp),%esi + xorl %edx,%ebp + xorl 20(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,32(%esp) + leal 1859775393(%esi,%edi,1),%esi + movl 36(%esp),%edi + addl %ebp,%esi + # 20_39 25 + movl %eax,%ebp + xorl 44(%esp),%edi + xorl %ebx,%ebp + xorl 4(%esp),%edi + xorl %ecx,%ebp + xorl 24(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,36(%esp) + leal 1859775393(%edi,%edx,1),%edi + movl 40(%esp),%edx + addl %ebp,%edi + # 20_39 26 + movl %esi,%ebp + xorl 48(%esp),%edx + xorl %eax,%ebp + xorl 8(%esp),%edx + xorl %ebx,%ebp + xorl 28(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,40(%esp) + leal 1859775393(%edx,%ecx,1),%edx + movl 44(%esp),%ecx + addl %ebp,%edx + # 20_39 27 + movl %edi,%ebp + xorl 52(%esp),%ecx + xorl %esi,%ebp + xorl 12(%esp),%ecx + xorl %eax,%ebp + xorl 32(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,44(%esp) + leal 1859775393(%ecx,%ebx,1),%ecx + movl 48(%esp),%ebx + addl %ebp,%ecx + # 20_39 28 + movl %edx,%ebp + xorl 56(%esp),%ebx + xorl %edi,%ebp + xorl 16(%esp),%ebx + xorl %esi,%ebp + xorl 36(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,48(%esp) + leal 1859775393(%ebx,%eax,1),%ebx + movl 52(%esp),%eax + addl %ebp,%ebx + # 20_39 29 + movl %ecx,%ebp + xorl 60(%esp),%eax + xorl %edx,%ebp + xorl 20(%esp),%eax + xorl %edi,%ebp + xorl 40(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,52(%esp) + leal 1859775393(%eax,%esi,1),%eax + movl 56(%esp),%esi + addl %ebp,%eax + # 20_39 30 + movl %ebx,%ebp + xorl (%esp),%esi + xorl %ecx,%ebp + xorl 24(%esp),%esi + xorl %edx,%ebp + xorl 44(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,56(%esp) + leal 1859775393(%esi,%edi,1),%esi + movl 60(%esp),%edi + addl %ebp,%esi + # 20_39 31 + movl %eax,%ebp + xorl 4(%esp),%edi + xorl %ebx,%ebp + xorl 28(%esp),%edi + xorl %ecx,%ebp + xorl 48(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,60(%esp) + leal 1859775393(%edi,%edx,1),%edi + movl (%esp),%edx + addl %ebp,%edi + # 20_39 32 + movl %esi,%ebp + xorl 8(%esp),%edx + xorl %eax,%ebp + xorl 32(%esp),%edx + xorl %ebx,%ebp + xorl 52(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,(%esp) + leal 1859775393(%edx,%ecx,1),%edx + movl 4(%esp),%ecx + addl %ebp,%edx + # 20_39 33 + movl %edi,%ebp + xorl 12(%esp),%ecx + xorl %esi,%ebp + xorl 36(%esp),%ecx + xorl %eax,%ebp + xorl 56(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,4(%esp) + leal 1859775393(%ecx,%ebx,1),%ecx + movl 8(%esp),%ebx + addl %ebp,%ecx + # 20_39 34 + movl %edx,%ebp + xorl 16(%esp),%ebx + xorl %edi,%ebp + xorl 40(%esp),%ebx + xorl %esi,%ebp + xorl 60(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,8(%esp) + leal 1859775393(%ebx,%eax,1),%ebx + movl 12(%esp),%eax + addl %ebp,%ebx + # 20_39 35 + movl %ecx,%ebp + xorl 20(%esp),%eax + xorl %edx,%ebp + xorl 44(%esp),%eax + xorl %edi,%ebp + xorl (%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,12(%esp) + leal 1859775393(%eax,%esi,1),%eax + movl 16(%esp),%esi + addl %ebp,%eax + # 20_39 36 + movl %ebx,%ebp + xorl 24(%esp),%esi + xorl %ecx,%ebp + xorl 48(%esp),%esi + xorl %edx,%ebp + xorl 4(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,16(%esp) + leal 1859775393(%esi,%edi,1),%esi + movl 20(%esp),%edi + addl %ebp,%esi + # 20_39 37 + movl %eax,%ebp + xorl 28(%esp),%edi + xorl %ebx,%ebp + xorl 52(%esp),%edi + xorl %ecx,%ebp + xorl 8(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,20(%esp) + leal 1859775393(%edi,%edx,1),%edi + movl 24(%esp),%edx + addl %ebp,%edi + # 20_39 38 + movl %esi,%ebp + xorl 32(%esp),%edx + xorl %eax,%ebp + xorl 56(%esp),%edx + xorl %ebx,%ebp + xorl 12(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,24(%esp) + leal 1859775393(%edx,%ecx,1),%edx + movl 28(%esp),%ecx + addl %ebp,%edx + # 20_39 39 + movl %edi,%ebp + xorl 36(%esp),%ecx + xorl %esi,%ebp + xorl 60(%esp),%ecx + xorl %eax,%ebp + xorl 16(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,28(%esp) + leal 1859775393(%ecx,%ebx,1),%ecx + movl 32(%esp),%ebx + addl %ebp,%ecx + # 40_59 40 + movl %edi,%ebp + xorl 40(%esp),%ebx + xorl %esi,%ebp + xorl (%esp),%ebx + andl %edx,%ebp + xorl 20(%esp),%ebx + roll $1,%ebx + addl %eax,%ebp + rorl $2,%edx + movl %ecx,%eax + roll $5,%eax + movl %ebx,32(%esp) + leal 2400959708(%ebx,%ebp,1),%ebx + movl %edi,%ebp + addl %eax,%ebx + andl %esi,%ebp + movl 36(%esp),%eax + addl %ebp,%ebx + # 40_59 41 + movl %edx,%ebp + xorl 44(%esp),%eax + xorl %edi,%ebp + xorl 4(%esp),%eax + andl %ecx,%ebp + xorl 24(%esp),%eax + roll $1,%eax + addl %esi,%ebp + rorl $2,%ecx + movl %ebx,%esi + roll $5,%esi + movl %eax,36(%esp) + leal 2400959708(%eax,%ebp,1),%eax + movl %edx,%ebp + addl %esi,%eax + andl %edi,%ebp + movl 40(%esp),%esi + addl %ebp,%eax + # 40_59 42 + movl %ecx,%ebp + xorl 48(%esp),%esi + xorl %edx,%ebp + xorl 8(%esp),%esi + andl %ebx,%ebp + xorl 28(%esp),%esi + roll $1,%esi + addl %edi,%ebp + rorl $2,%ebx + movl %eax,%edi + roll $5,%edi + movl %esi,40(%esp) + leal 2400959708(%esi,%ebp,1),%esi + movl %ecx,%ebp + addl %edi,%esi + andl %edx,%ebp + movl 44(%esp),%edi + addl %ebp,%esi + # 40_59 43 + movl %ebx,%ebp + xorl 52(%esp),%edi + xorl %ecx,%ebp + xorl 12(%esp),%edi + andl %eax,%ebp + xorl 32(%esp),%edi + roll $1,%edi + addl %edx,%ebp + rorl $2,%eax + movl %esi,%edx + roll $5,%edx + movl %edi,44(%esp) + leal 2400959708(%edi,%ebp,1),%edi + movl %ebx,%ebp + addl %edx,%edi + andl %ecx,%ebp + movl 48(%esp),%edx + addl %ebp,%edi + # 40_59 44 + movl %eax,%ebp + xorl 56(%esp),%edx + xorl %ebx,%ebp + xorl 16(%esp),%edx + andl %esi,%ebp + xorl 36(%esp),%edx + roll $1,%edx + addl %ecx,%ebp + rorl $2,%esi + movl %edi,%ecx + roll $5,%ecx + movl %edx,48(%esp) + leal 2400959708(%edx,%ebp,1),%edx + movl %eax,%ebp + addl %ecx,%edx + andl %ebx,%ebp + movl 52(%esp),%ecx + addl %ebp,%edx + # 40_59 45 + movl %esi,%ebp + xorl 60(%esp),%ecx + xorl %eax,%ebp + xorl 20(%esp),%ecx + andl %edi,%ebp + xorl 40(%esp),%ecx + roll $1,%ecx + addl %ebx,%ebp + rorl $2,%edi + movl %edx,%ebx + roll $5,%ebx + movl %ecx,52(%esp) + leal 2400959708(%ecx,%ebp,1),%ecx + movl %esi,%ebp + addl %ebx,%ecx + andl %eax,%ebp + movl 56(%esp),%ebx + addl %ebp,%ecx + # 40_59 46 + movl %edi,%ebp + xorl (%esp),%ebx + xorl %esi,%ebp + xorl 24(%esp),%ebx + andl %edx,%ebp + xorl 44(%esp),%ebx + roll $1,%ebx + addl %eax,%ebp + rorl $2,%edx + movl %ecx,%eax + roll $5,%eax + movl %ebx,56(%esp) + leal 2400959708(%ebx,%ebp,1),%ebx + movl %edi,%ebp + addl %eax,%ebx + andl %esi,%ebp + movl 60(%esp),%eax + addl %ebp,%ebx + # 40_59 47 + movl %edx,%ebp + xorl 4(%esp),%eax + xorl %edi,%ebp + xorl 28(%esp),%eax + andl %ecx,%ebp + xorl 48(%esp),%eax + roll $1,%eax + addl %esi,%ebp + rorl $2,%ecx + movl %ebx,%esi + roll $5,%esi + movl %eax,60(%esp) + leal 2400959708(%eax,%ebp,1),%eax + movl %edx,%ebp + addl %esi,%eax + andl %edi,%ebp + movl (%esp),%esi + addl %ebp,%eax + # 40_59 48 + movl %ecx,%ebp + xorl 8(%esp),%esi + xorl %edx,%ebp + xorl 32(%esp),%esi + andl %ebx,%ebp + xorl 52(%esp),%esi + roll $1,%esi + addl %edi,%ebp + rorl $2,%ebx + movl %eax,%edi + roll $5,%edi + movl %esi,(%esp) + leal 2400959708(%esi,%ebp,1),%esi + movl %ecx,%ebp + addl %edi,%esi + andl %edx,%ebp + movl 4(%esp),%edi + addl %ebp,%esi + # 40_59 49 + movl %ebx,%ebp + xorl 12(%esp),%edi + xorl %ecx,%ebp + xorl 36(%esp),%edi + andl %eax,%ebp + xorl 56(%esp),%edi + roll $1,%edi + addl %edx,%ebp + rorl $2,%eax + movl %esi,%edx + roll $5,%edx + movl %edi,4(%esp) + leal 2400959708(%edi,%ebp,1),%edi + movl %ebx,%ebp + addl %edx,%edi + andl %ecx,%ebp + movl 8(%esp),%edx + addl %ebp,%edi + # 40_59 50 + movl %eax,%ebp + xorl 16(%esp),%edx + xorl %ebx,%ebp + xorl 40(%esp),%edx + andl %esi,%ebp + xorl 60(%esp),%edx + roll $1,%edx + addl %ecx,%ebp + rorl $2,%esi + movl %edi,%ecx + roll $5,%ecx + movl %edx,8(%esp) + leal 2400959708(%edx,%ebp,1),%edx + movl %eax,%ebp + addl %ecx,%edx + andl %ebx,%ebp + movl 12(%esp),%ecx + addl %ebp,%edx + # 40_59 51 + movl %esi,%ebp + xorl 20(%esp),%ecx + xorl %eax,%ebp + xorl 44(%esp),%ecx + andl %edi,%ebp + xorl (%esp),%ecx + roll $1,%ecx + addl %ebx,%ebp + rorl $2,%edi + movl %edx,%ebx + roll $5,%ebx + movl %ecx,12(%esp) + leal 2400959708(%ecx,%ebp,1),%ecx + movl %esi,%ebp + addl %ebx,%ecx + andl %eax,%ebp + movl 16(%esp),%ebx + addl %ebp,%ecx + # 40_59 52 + movl %edi,%ebp + xorl 24(%esp),%ebx + xorl %esi,%ebp + xorl 48(%esp),%ebx + andl %edx,%ebp + xorl 4(%esp),%ebx + roll $1,%ebx + addl %eax,%ebp + rorl $2,%edx + movl %ecx,%eax + roll $5,%eax + movl %ebx,16(%esp) + leal 2400959708(%ebx,%ebp,1),%ebx + movl %edi,%ebp + addl %eax,%ebx + andl %esi,%ebp + movl 20(%esp),%eax + addl %ebp,%ebx + # 40_59 53 + movl %edx,%ebp + xorl 28(%esp),%eax + xorl %edi,%ebp + xorl 52(%esp),%eax + andl %ecx,%ebp + xorl 8(%esp),%eax + roll $1,%eax + addl %esi,%ebp + rorl $2,%ecx + movl %ebx,%esi + roll $5,%esi + movl %eax,20(%esp) + leal 2400959708(%eax,%ebp,1),%eax + movl %edx,%ebp + addl %esi,%eax + andl %edi,%ebp + movl 24(%esp),%esi + addl %ebp,%eax + # 40_59 54 + movl %ecx,%ebp + xorl 32(%esp),%esi + xorl %edx,%ebp + xorl 56(%esp),%esi + andl %ebx,%ebp + xorl 12(%esp),%esi + roll $1,%esi + addl %edi,%ebp + rorl $2,%ebx + movl %eax,%edi + roll $5,%edi + movl %esi,24(%esp) + leal 2400959708(%esi,%ebp,1),%esi + movl %ecx,%ebp + addl %edi,%esi + andl %edx,%ebp + movl 28(%esp),%edi + addl %ebp,%esi + # 40_59 55 + movl %ebx,%ebp + xorl 36(%esp),%edi + xorl %ecx,%ebp + xorl 60(%esp),%edi + andl %eax,%ebp + xorl 16(%esp),%edi + roll $1,%edi + addl %edx,%ebp + rorl $2,%eax + movl %esi,%edx + roll $5,%edx + movl %edi,28(%esp) + leal 2400959708(%edi,%ebp,1),%edi + movl %ebx,%ebp + addl %edx,%edi + andl %ecx,%ebp + movl 32(%esp),%edx + addl %ebp,%edi + # 40_59 56 + movl %eax,%ebp + xorl 40(%esp),%edx + xorl %ebx,%ebp + xorl (%esp),%edx + andl %esi,%ebp + xorl 20(%esp),%edx + roll $1,%edx + addl %ecx,%ebp + rorl $2,%esi + movl %edi,%ecx + roll $5,%ecx + movl %edx,32(%esp) + leal 2400959708(%edx,%ebp,1),%edx + movl %eax,%ebp + addl %ecx,%edx + andl %ebx,%ebp + movl 36(%esp),%ecx + addl %ebp,%edx + # 40_59 57 + movl %esi,%ebp + xorl 44(%esp),%ecx + xorl %eax,%ebp + xorl 4(%esp),%ecx + andl %edi,%ebp + xorl 24(%esp),%ecx + roll $1,%ecx + addl %ebx,%ebp + rorl $2,%edi + movl %edx,%ebx + roll $5,%ebx + movl %ecx,36(%esp) + leal 2400959708(%ecx,%ebp,1),%ecx + movl %esi,%ebp + addl %ebx,%ecx + andl %eax,%ebp + movl 40(%esp),%ebx + addl %ebp,%ecx + # 40_59 58 + movl %edi,%ebp + xorl 48(%esp),%ebx + xorl %esi,%ebp + xorl 8(%esp),%ebx + andl %edx,%ebp + xorl 28(%esp),%ebx + roll $1,%ebx + addl %eax,%ebp + rorl $2,%edx + movl %ecx,%eax + roll $5,%eax + movl %ebx,40(%esp) + leal 2400959708(%ebx,%ebp,1),%ebx + movl %edi,%ebp + addl %eax,%ebx + andl %esi,%ebp + movl 44(%esp),%eax + addl %ebp,%ebx + # 40_59 59 + movl %edx,%ebp + xorl 52(%esp),%eax + xorl %edi,%ebp + xorl 12(%esp),%eax + andl %ecx,%ebp + xorl 32(%esp),%eax + roll $1,%eax + addl %esi,%ebp + rorl $2,%ecx + movl %ebx,%esi + roll $5,%esi + movl %eax,44(%esp) + leal 2400959708(%eax,%ebp,1),%eax + movl %edx,%ebp + addl %esi,%eax + andl %edi,%ebp + movl 48(%esp),%esi + addl %ebp,%eax + # 20_39 60 + movl %ebx,%ebp + xorl 56(%esp),%esi + xorl %ecx,%ebp + xorl 16(%esp),%esi + xorl %edx,%ebp + xorl 36(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,48(%esp) + leal 3395469782(%esi,%edi,1),%esi + movl 52(%esp),%edi + addl %ebp,%esi + # 20_39 61 + movl %eax,%ebp + xorl 60(%esp),%edi + xorl %ebx,%ebp + xorl 20(%esp),%edi + xorl %ecx,%ebp + xorl 40(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,52(%esp) + leal 3395469782(%edi,%edx,1),%edi + movl 56(%esp),%edx + addl %ebp,%edi + # 20_39 62 + movl %esi,%ebp + xorl (%esp),%edx + xorl %eax,%ebp + xorl 24(%esp),%edx + xorl %ebx,%ebp + xorl 44(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,56(%esp) + leal 3395469782(%edx,%ecx,1),%edx + movl 60(%esp),%ecx + addl %ebp,%edx + # 20_39 63 + movl %edi,%ebp + xorl 4(%esp),%ecx + xorl %esi,%ebp + xorl 28(%esp),%ecx + xorl %eax,%ebp + xorl 48(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,60(%esp) + leal 3395469782(%ecx,%ebx,1),%ecx + movl (%esp),%ebx + addl %ebp,%ecx + # 20_39 64 + movl %edx,%ebp + xorl 8(%esp),%ebx + xorl %edi,%ebp + xorl 32(%esp),%ebx + xorl %esi,%ebp + xorl 52(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,(%esp) + leal 3395469782(%ebx,%eax,1),%ebx + movl 4(%esp),%eax + addl %ebp,%ebx + # 20_39 65 + movl %ecx,%ebp + xorl 12(%esp),%eax + xorl %edx,%ebp + xorl 36(%esp),%eax + xorl %edi,%ebp + xorl 56(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,4(%esp) + leal 3395469782(%eax,%esi,1),%eax + movl 8(%esp),%esi + addl %ebp,%eax + # 20_39 66 + movl %ebx,%ebp + xorl 16(%esp),%esi + xorl %ecx,%ebp + xorl 40(%esp),%esi + xorl %edx,%ebp + xorl 60(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,8(%esp) + leal 3395469782(%esi,%edi,1),%esi + movl 12(%esp),%edi + addl %ebp,%esi + # 20_39 67 + movl %eax,%ebp + xorl 20(%esp),%edi + xorl %ebx,%ebp + xorl 44(%esp),%edi + xorl %ecx,%ebp + xorl (%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,12(%esp) + leal 3395469782(%edi,%edx,1),%edi + movl 16(%esp),%edx + addl %ebp,%edi + # 20_39 68 + movl %esi,%ebp + xorl 24(%esp),%edx + xorl %eax,%ebp + xorl 48(%esp),%edx + xorl %ebx,%ebp + xorl 4(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,16(%esp) + leal 3395469782(%edx,%ecx,1),%edx + movl 20(%esp),%ecx + addl %ebp,%edx + # 20_39 69 + movl %edi,%ebp + xorl 28(%esp),%ecx + xorl %esi,%ebp + xorl 52(%esp),%ecx + xorl %eax,%ebp + xorl 8(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,20(%esp) + leal 3395469782(%ecx,%ebx,1),%ecx + movl 24(%esp),%ebx + addl %ebp,%ecx + # 20_39 70 + movl %edx,%ebp + xorl 32(%esp),%ebx + xorl %edi,%ebp + xorl 56(%esp),%ebx + xorl %esi,%ebp + xorl 12(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,24(%esp) + leal 3395469782(%ebx,%eax,1),%ebx + movl 28(%esp),%eax + addl %ebp,%ebx + # 20_39 71 + movl %ecx,%ebp + xorl 36(%esp),%eax + xorl %edx,%ebp + xorl 60(%esp),%eax + xorl %edi,%ebp + xorl 16(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,28(%esp) + leal 3395469782(%eax,%esi,1),%eax + movl 32(%esp),%esi + addl %ebp,%eax + # 20_39 72 + movl %ebx,%ebp + xorl 40(%esp),%esi + xorl %ecx,%ebp + xorl (%esp),%esi + xorl %edx,%ebp + xorl 20(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,32(%esp) + leal 3395469782(%esi,%edi,1),%esi + movl 36(%esp),%edi + addl %ebp,%esi + # 20_39 73 + movl %eax,%ebp + xorl 44(%esp),%edi + xorl %ebx,%ebp + xorl 4(%esp),%edi + xorl %ecx,%ebp + xorl 24(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,36(%esp) + leal 3395469782(%edi,%edx,1),%edi + movl 40(%esp),%edx + addl %ebp,%edi + # 20_39 74 + movl %esi,%ebp + xorl 48(%esp),%edx + xorl %eax,%ebp + xorl 8(%esp),%edx + xorl %ebx,%ebp + xorl 28(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,40(%esp) + leal 3395469782(%edx,%ecx,1),%edx + movl 44(%esp),%ecx + addl %ebp,%edx + # 20_39 75 + movl %edi,%ebp + xorl 52(%esp),%ecx + xorl %esi,%ebp + xorl 12(%esp),%ecx + xorl %eax,%ebp + xorl 32(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,44(%esp) + leal 3395469782(%ecx,%ebx,1),%ecx + movl 48(%esp),%ebx + addl %ebp,%ecx + # 20_39 76 + movl %edx,%ebp + xorl 56(%esp),%ebx + xorl %edi,%ebp + xorl 16(%esp),%ebx + xorl %esi,%ebp + xorl 36(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,48(%esp) + leal 3395469782(%ebx,%eax,1),%ebx + movl 52(%esp),%eax + addl %ebp,%ebx + # 20_39 77 + movl %ecx,%ebp + xorl 60(%esp),%eax + xorl %edx,%ebp + xorl 20(%esp),%eax + xorl %edi,%ebp + xorl 40(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + leal 3395469782(%eax,%esi,1),%eax + movl 56(%esp),%esi + addl %ebp,%eax + # 20_39 78 + movl %ebx,%ebp + xorl (%esp),%esi + xorl %ecx,%ebp + xorl 24(%esp),%esi + xorl %edx,%ebp + xorl 44(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + leal 3395469782(%esi,%edi,1),%esi + movl 60(%esp),%edi + addl %ebp,%esi + # 20_39 79 + movl %eax,%ebp + xorl 4(%esp),%edi + xorl %ebx,%ebp + xorl 28(%esp),%edi + xorl %ecx,%ebp + xorl 48(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + leal 3395469782(%edi,%edx,1),%edi + addl %ebp,%edi + movl 96(%esp),%ebp + movl 100(%esp),%edx + addl (%ebp),%edi + addl 4(%ebp),%esi + addl 8(%ebp),%eax + addl 12(%ebp),%ebx + addl 16(%ebp),%ecx + movl %edi,(%ebp) + addl $64,%edx + movl %esi,4(%ebp) + cmpl 104(%esp),%edx + movl %eax,8(%ebp) + movl %ecx,%edi + movl %ebx,12(%ebp) + movl %edx,%esi + movl %ecx,16(%ebp) + jb L002loop + addl $76,%esp + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.type __sha1_block_data_order_shaext,@function +.align 4 +__sha1_block_data_order_shaext: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + call L003pic_point +L003pic_point: + popl %ebp + leal LK_XX_XX-L003pic_point(%ebp),%ebp +Lshaext_shortcut: + movl 20(%esp),%edi + movl %esp,%ebx + movl 24(%esp),%esi + movl 28(%esp),%ecx + subl $32,%esp + movdqu (%edi),%xmm0 + movd 16(%edi),%xmm1 + andl $-32,%esp + movdqa 80(%ebp),%xmm3 + movdqu (%esi),%xmm4 + pshufd $27,%xmm0,%xmm0 + movdqu 16(%esi),%xmm5 + pshufd $27,%xmm1,%xmm1 + movdqu 32(%esi),%xmm6 +.byte 102,15,56,0,227 + movdqu 48(%esi),%xmm7 +.byte 102,15,56,0,235 +.byte 102,15,56,0,243 +.byte 102,15,56,0,251 + jmp L004loop_shaext +.align 4,0x90 +L004loop_shaext: + decl %ecx + leal 64(%esi),%eax + movdqa %xmm1,(%esp) + paddd %xmm4,%xmm1 + cmovnel %eax,%esi + movdqa %xmm0,16(%esp) +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,0 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,0 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,0 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,0 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,0 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,1 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,1 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,1 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,1 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,1 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,2 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,2 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,2 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,2 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,2 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,3 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 + movdqu (%esi),%xmm4 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,3 +.byte 15,56,200,213 + movdqu 16(%esi),%xmm5 +.byte 102,15,56,0,227 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,3 +.byte 15,56,200,206 + movdqu 32(%esi),%xmm6 +.byte 102,15,56,0,235 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,3 +.byte 15,56,200,215 + movdqu 48(%esi),%xmm7 +.byte 102,15,56,0,243 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,3 + movdqa (%esp),%xmm2 +.byte 102,15,56,0,251 +.byte 15,56,200,202 + paddd 16(%esp),%xmm0 + jnz L004loop_shaext + pshufd $27,%xmm0,%xmm0 + pshufd $27,%xmm1,%xmm1 + movdqu %xmm0,(%edi) + movd %xmm1,16(%edi) + movl %ebx,%esp + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.type __sha1_block_data_order_ssse3,@function +.align 4 +__sha1_block_data_order_ssse3: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + call L005pic_point +L005pic_point: + popl %ebp + leal LK_XX_XX-L005pic_point(%ebp),%ebp +Lssse3_shortcut: + movdqa (%ebp),%xmm7 + movdqa 16(%ebp),%xmm0 + movdqa 32(%ebp),%xmm1 + movdqa 48(%ebp),%xmm2 + movdqa 64(%ebp),%xmm6 + movl 20(%esp),%edi + movl 24(%esp),%ebp + movl 28(%esp),%edx + movl %esp,%esi + subl $208,%esp + andl $-64,%esp + movdqa %xmm0,112(%esp) + movdqa %xmm1,128(%esp) + movdqa %xmm2,144(%esp) + shll $6,%edx + movdqa %xmm7,160(%esp) + addl %ebp,%edx + movdqa %xmm6,176(%esp) + addl $64,%ebp + movl %edi,192(%esp) + movl %ebp,196(%esp) + movl %edx,200(%esp) + movl %esi,204(%esp) + movl (%edi),%eax + movl 4(%edi),%ebx + movl 8(%edi),%ecx + movl 12(%edi),%edx + movl 16(%edi),%edi + movl %ebx,%esi + movdqu -64(%ebp),%xmm0 + movdqu -48(%ebp),%xmm1 + movdqu -32(%ebp),%xmm2 + movdqu -16(%ebp),%xmm3 +.byte 102,15,56,0,198 +.byte 102,15,56,0,206 +.byte 102,15,56,0,214 + movdqa %xmm7,96(%esp) +.byte 102,15,56,0,222 + paddd %xmm7,%xmm0 + paddd %xmm7,%xmm1 + paddd %xmm7,%xmm2 + movdqa %xmm0,(%esp) + psubd %xmm7,%xmm0 + movdqa %xmm1,16(%esp) + psubd %xmm7,%xmm1 + movdqa %xmm2,32(%esp) + movl %ecx,%ebp + psubd %xmm7,%xmm2 + xorl %edx,%ebp + pshufd $238,%xmm0,%xmm4 + andl %ebp,%esi + jmp L006loop +.align 4,0x90 +L006loop: + rorl $2,%ebx + xorl %edx,%esi + movl %eax,%ebp + punpcklqdq %xmm1,%xmm4 + movdqa %xmm3,%xmm6 + addl (%esp),%edi + xorl %ecx,%ebx + paddd %xmm3,%xmm7 + movdqa %xmm0,64(%esp) + roll $5,%eax + addl %esi,%edi + psrldq $4,%xmm6 + andl %ebx,%ebp + xorl %ecx,%ebx + pxor %xmm0,%xmm4 + addl %eax,%edi + rorl $7,%eax + pxor %xmm2,%xmm6 + xorl %ecx,%ebp + movl %edi,%esi + addl 4(%esp),%edx + pxor %xmm6,%xmm4 + xorl %ebx,%eax + roll $5,%edi + movdqa %xmm7,48(%esp) + addl %ebp,%edx + andl %eax,%esi + movdqa %xmm4,%xmm0 + xorl %ebx,%eax + addl %edi,%edx + rorl $7,%edi + movdqa %xmm4,%xmm6 + xorl %ebx,%esi + pslldq $12,%xmm0 + paddd %xmm4,%xmm4 + movl %edx,%ebp + addl 8(%esp),%ecx + psrld $31,%xmm6 + xorl %eax,%edi + roll $5,%edx + movdqa %xmm0,%xmm7 + addl %esi,%ecx + andl %edi,%ebp + xorl %eax,%edi + psrld $30,%xmm0 + addl %edx,%ecx + rorl $7,%edx + por %xmm6,%xmm4 + xorl %eax,%ebp + movl %ecx,%esi + addl 12(%esp),%ebx + pslld $2,%xmm7 + xorl %edi,%edx + roll $5,%ecx + pxor %xmm0,%xmm4 + movdqa 96(%esp),%xmm0 + addl %ebp,%ebx + andl %edx,%esi + pxor %xmm7,%xmm4 + pshufd $238,%xmm1,%xmm5 + xorl %edi,%edx + addl %ecx,%ebx + rorl $7,%ecx + xorl %edi,%esi + movl %ebx,%ebp + punpcklqdq %xmm2,%xmm5 + movdqa %xmm4,%xmm7 + addl 16(%esp),%eax + xorl %edx,%ecx + paddd %xmm4,%xmm0 + movdqa %xmm1,80(%esp) + roll $5,%ebx + addl %esi,%eax + psrldq $4,%xmm7 + andl %ecx,%ebp + xorl %edx,%ecx + pxor %xmm1,%xmm5 + addl %ebx,%eax + rorl $7,%ebx + pxor %xmm3,%xmm7 + xorl %edx,%ebp + movl %eax,%esi + addl 20(%esp),%edi + pxor %xmm7,%xmm5 + xorl %ecx,%ebx + roll $5,%eax + movdqa %xmm0,(%esp) + addl %ebp,%edi + andl %ebx,%esi + movdqa %xmm5,%xmm1 + xorl %ecx,%ebx + addl %eax,%edi + rorl $7,%eax + movdqa %xmm5,%xmm7 + xorl %ecx,%esi + pslldq $12,%xmm1 + paddd %xmm5,%xmm5 + movl %edi,%ebp + addl 24(%esp),%edx + psrld $31,%xmm7 + xorl %ebx,%eax + roll $5,%edi + movdqa %xmm1,%xmm0 + addl %esi,%edx + andl %eax,%ebp + xorl %ebx,%eax + psrld $30,%xmm1 + addl %edi,%edx + rorl $7,%edi + por %xmm7,%xmm5 + xorl %ebx,%ebp + movl %edx,%esi + addl 28(%esp),%ecx + pslld $2,%xmm0 + xorl %eax,%edi + roll $5,%edx + pxor %xmm1,%xmm5 + movdqa 112(%esp),%xmm1 + addl %ebp,%ecx + andl %edi,%esi + pxor %xmm0,%xmm5 + pshufd $238,%xmm2,%xmm6 + xorl %eax,%edi + addl %edx,%ecx + rorl $7,%edx + xorl %eax,%esi + movl %ecx,%ebp + punpcklqdq %xmm3,%xmm6 + movdqa %xmm5,%xmm0 + addl 32(%esp),%ebx + xorl %edi,%edx + paddd %xmm5,%xmm1 + movdqa %xmm2,96(%esp) + roll $5,%ecx + addl %esi,%ebx + psrldq $4,%xmm0 + andl %edx,%ebp + xorl %edi,%edx + pxor %xmm2,%xmm6 + addl %ecx,%ebx + rorl $7,%ecx + pxor %xmm4,%xmm0 + xorl %edi,%ebp + movl %ebx,%esi + addl 36(%esp),%eax + pxor %xmm0,%xmm6 + xorl %edx,%ecx + roll $5,%ebx + movdqa %xmm1,16(%esp) + addl %ebp,%eax + andl %ecx,%esi + movdqa %xmm6,%xmm2 + xorl %edx,%ecx + addl %ebx,%eax + rorl $7,%ebx + movdqa %xmm6,%xmm0 + xorl %edx,%esi + pslldq $12,%xmm2 + paddd %xmm6,%xmm6 + movl %eax,%ebp + addl 40(%esp),%edi + psrld $31,%xmm0 + xorl %ecx,%ebx + roll $5,%eax + movdqa %xmm2,%xmm1 + addl %esi,%edi + andl %ebx,%ebp + xorl %ecx,%ebx + psrld $30,%xmm2 + addl %eax,%edi + rorl $7,%eax + por %xmm0,%xmm6 + xorl %ecx,%ebp + movdqa 64(%esp),%xmm0 + movl %edi,%esi + addl 44(%esp),%edx + pslld $2,%xmm1 + xorl %ebx,%eax + roll $5,%edi + pxor %xmm2,%xmm6 + movdqa 112(%esp),%xmm2 + addl %ebp,%edx + andl %eax,%esi + pxor %xmm1,%xmm6 + pshufd $238,%xmm3,%xmm7 + xorl %ebx,%eax + addl %edi,%edx + rorl $7,%edi + xorl %ebx,%esi + movl %edx,%ebp + punpcklqdq %xmm4,%xmm7 + movdqa %xmm6,%xmm1 + addl 48(%esp),%ecx + xorl %eax,%edi + paddd %xmm6,%xmm2 + movdqa %xmm3,64(%esp) + roll $5,%edx + addl %esi,%ecx + psrldq $4,%xmm1 + andl %edi,%ebp + xorl %eax,%edi + pxor %xmm3,%xmm7 + addl %edx,%ecx + rorl $7,%edx + pxor %xmm5,%xmm1 + xorl %eax,%ebp + movl %ecx,%esi + addl 52(%esp),%ebx + pxor %xmm1,%xmm7 + xorl %edi,%edx + roll $5,%ecx + movdqa %xmm2,32(%esp) + addl %ebp,%ebx + andl %edx,%esi + movdqa %xmm7,%xmm3 + xorl %edi,%edx + addl %ecx,%ebx + rorl $7,%ecx + movdqa %xmm7,%xmm1 + xorl %edi,%esi + pslldq $12,%xmm3 + paddd %xmm7,%xmm7 + movl %ebx,%ebp + addl 56(%esp),%eax + psrld $31,%xmm1 + xorl %edx,%ecx + roll $5,%ebx + movdqa %xmm3,%xmm2 + addl %esi,%eax + andl %ecx,%ebp + xorl %edx,%ecx + psrld $30,%xmm3 + addl %ebx,%eax + rorl $7,%ebx + por %xmm1,%xmm7 + xorl %edx,%ebp + movdqa 80(%esp),%xmm1 + movl %eax,%esi + addl 60(%esp),%edi + pslld $2,%xmm2 + xorl %ecx,%ebx + roll $5,%eax + pxor %xmm3,%xmm7 + movdqa 112(%esp),%xmm3 + addl %ebp,%edi + andl %ebx,%esi + pxor %xmm2,%xmm7 + pshufd $238,%xmm6,%xmm2 + xorl %ecx,%ebx + addl %eax,%edi + rorl $7,%eax + pxor %xmm4,%xmm0 + punpcklqdq %xmm7,%xmm2 + xorl %ecx,%esi + movl %edi,%ebp + addl (%esp),%edx + pxor %xmm1,%xmm0 + movdqa %xmm4,80(%esp) + xorl %ebx,%eax + roll $5,%edi + movdqa %xmm3,%xmm4 + addl %esi,%edx + paddd %xmm7,%xmm3 + andl %eax,%ebp + pxor %xmm2,%xmm0 + xorl %ebx,%eax + addl %edi,%edx + rorl $7,%edi + xorl %ebx,%ebp + movdqa %xmm0,%xmm2 + movdqa %xmm3,48(%esp) + movl %edx,%esi + addl 4(%esp),%ecx + xorl %eax,%edi + roll $5,%edx + pslld $2,%xmm0 + addl %ebp,%ecx + andl %edi,%esi + psrld $30,%xmm2 + xorl %eax,%edi + addl %edx,%ecx + rorl $7,%edx + xorl %eax,%esi + movl %ecx,%ebp + addl 8(%esp),%ebx + xorl %edi,%edx + roll $5,%ecx + por %xmm2,%xmm0 + addl %esi,%ebx + andl %edx,%ebp + movdqa 96(%esp),%xmm2 + xorl %edi,%edx + addl %ecx,%ebx + addl 12(%esp),%eax + xorl %edi,%ebp + movl %ebx,%esi + pshufd $238,%xmm7,%xmm3 + roll $5,%ebx + addl %ebp,%eax + xorl %edx,%esi + rorl $7,%ecx + addl %ebx,%eax + addl 16(%esp),%edi + pxor %xmm5,%xmm1 + punpcklqdq %xmm0,%xmm3 + xorl %ecx,%esi + movl %eax,%ebp + roll $5,%eax + pxor %xmm2,%xmm1 + movdqa %xmm5,96(%esp) + addl %esi,%edi + xorl %ecx,%ebp + movdqa %xmm4,%xmm5 + rorl $7,%ebx + paddd %xmm0,%xmm4 + addl %eax,%edi + pxor %xmm3,%xmm1 + addl 20(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + roll $5,%edi + movdqa %xmm1,%xmm3 + movdqa %xmm4,(%esp) + addl %ebp,%edx + xorl %ebx,%esi + rorl $7,%eax + addl %edi,%edx + pslld $2,%xmm1 + addl 24(%esp),%ecx + xorl %eax,%esi + psrld $30,%xmm3 + movl %edx,%ebp + roll $5,%edx + addl %esi,%ecx + xorl %eax,%ebp + rorl $7,%edi + addl %edx,%ecx + por %xmm3,%xmm1 + addl 28(%esp),%ebx + xorl %edi,%ebp + movdqa 64(%esp),%xmm3 + movl %ecx,%esi + roll $5,%ecx + addl %ebp,%ebx + xorl %edi,%esi + rorl $7,%edx + pshufd $238,%xmm0,%xmm4 + addl %ecx,%ebx + addl 32(%esp),%eax + pxor %xmm6,%xmm2 + punpcklqdq %xmm1,%xmm4 + xorl %edx,%esi + movl %ebx,%ebp + roll $5,%ebx + pxor %xmm3,%xmm2 + movdqa %xmm6,64(%esp) + addl %esi,%eax + xorl %edx,%ebp + movdqa 128(%esp),%xmm6 + rorl $7,%ecx + paddd %xmm1,%xmm5 + addl %ebx,%eax + pxor %xmm4,%xmm2 + addl 36(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + roll $5,%eax + movdqa %xmm2,%xmm4 + movdqa %xmm5,16(%esp) + addl %ebp,%edi + xorl %ecx,%esi + rorl $7,%ebx + addl %eax,%edi + pslld $2,%xmm2 + addl 40(%esp),%edx + xorl %ebx,%esi + psrld $30,%xmm4 + movl %edi,%ebp + roll $5,%edi + addl %esi,%edx + xorl %ebx,%ebp + rorl $7,%eax + addl %edi,%edx + por %xmm4,%xmm2 + addl 44(%esp),%ecx + xorl %eax,%ebp + movdqa 80(%esp),%xmm4 + movl %edx,%esi + roll $5,%edx + addl %ebp,%ecx + xorl %eax,%esi + rorl $7,%edi + pshufd $238,%xmm1,%xmm5 + addl %edx,%ecx + addl 48(%esp),%ebx + pxor %xmm7,%xmm3 + punpcklqdq %xmm2,%xmm5 + xorl %edi,%esi + movl %ecx,%ebp + roll $5,%ecx + pxor %xmm4,%xmm3 + movdqa %xmm7,80(%esp) + addl %esi,%ebx + xorl %edi,%ebp + movdqa %xmm6,%xmm7 + rorl $7,%edx + paddd %xmm2,%xmm6 + addl %ecx,%ebx + pxor %xmm5,%xmm3 + addl 52(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + roll $5,%ebx + movdqa %xmm3,%xmm5 + movdqa %xmm6,32(%esp) + addl %ebp,%eax + xorl %edx,%esi + rorl $7,%ecx + addl %ebx,%eax + pslld $2,%xmm3 + addl 56(%esp),%edi + xorl %ecx,%esi + psrld $30,%xmm5 + movl %eax,%ebp + roll $5,%eax + addl %esi,%edi + xorl %ecx,%ebp + rorl $7,%ebx + addl %eax,%edi + por %xmm5,%xmm3 + addl 60(%esp),%edx + xorl %ebx,%ebp + movdqa 96(%esp),%xmm5 + movl %edi,%esi + roll $5,%edi + addl %ebp,%edx + xorl %ebx,%esi + rorl $7,%eax + pshufd $238,%xmm2,%xmm6 + addl %edi,%edx + addl (%esp),%ecx + pxor %xmm0,%xmm4 + punpcklqdq %xmm3,%xmm6 + xorl %eax,%esi + movl %edx,%ebp + roll $5,%edx + pxor %xmm5,%xmm4 + movdqa %xmm0,96(%esp) + addl %esi,%ecx + xorl %eax,%ebp + movdqa %xmm7,%xmm0 + rorl $7,%edi + paddd %xmm3,%xmm7 + addl %edx,%ecx + pxor %xmm6,%xmm4 + addl 4(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + roll $5,%ecx + movdqa %xmm4,%xmm6 + movdqa %xmm7,48(%esp) + addl %ebp,%ebx + xorl %edi,%esi + rorl $7,%edx + addl %ecx,%ebx + pslld $2,%xmm4 + addl 8(%esp),%eax + xorl %edx,%esi + psrld $30,%xmm6 + movl %ebx,%ebp + roll $5,%ebx + addl %esi,%eax + xorl %edx,%ebp + rorl $7,%ecx + addl %ebx,%eax + por %xmm6,%xmm4 + addl 12(%esp),%edi + xorl %ecx,%ebp + movdqa 64(%esp),%xmm6 + movl %eax,%esi + roll $5,%eax + addl %ebp,%edi + xorl %ecx,%esi + rorl $7,%ebx + pshufd $238,%xmm3,%xmm7 + addl %eax,%edi + addl 16(%esp),%edx + pxor %xmm1,%xmm5 + punpcklqdq %xmm4,%xmm7 + xorl %ebx,%esi + movl %edi,%ebp + roll $5,%edi + pxor %xmm6,%xmm5 + movdqa %xmm1,64(%esp) + addl %esi,%edx + xorl %ebx,%ebp + movdqa %xmm0,%xmm1 + rorl $7,%eax + paddd %xmm4,%xmm0 + addl %edi,%edx + pxor %xmm7,%xmm5 + addl 20(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + roll $5,%edx + movdqa %xmm5,%xmm7 + movdqa %xmm0,(%esp) + addl %ebp,%ecx + xorl %eax,%esi + rorl $7,%edi + addl %edx,%ecx + pslld $2,%xmm5 + addl 24(%esp),%ebx + xorl %edi,%esi + psrld $30,%xmm7 + movl %ecx,%ebp + roll $5,%ecx + addl %esi,%ebx + xorl %edi,%ebp + rorl $7,%edx + addl %ecx,%ebx + por %xmm7,%xmm5 + addl 28(%esp),%eax + movdqa 80(%esp),%xmm7 + rorl $7,%ecx + movl %ebx,%esi + xorl %edx,%ebp + roll $5,%ebx + pshufd $238,%xmm4,%xmm0 + addl %ebp,%eax + xorl %ecx,%esi + xorl %edx,%ecx + addl %ebx,%eax + addl 32(%esp),%edi + pxor %xmm2,%xmm6 + punpcklqdq %xmm5,%xmm0 + andl %ecx,%esi + xorl %edx,%ecx + rorl $7,%ebx + pxor %xmm7,%xmm6 + movdqa %xmm2,80(%esp) + movl %eax,%ebp + xorl %ecx,%esi + roll $5,%eax + movdqa %xmm1,%xmm2 + addl %esi,%edi + paddd %xmm5,%xmm1 + xorl %ebx,%ebp + pxor %xmm0,%xmm6 + xorl %ecx,%ebx + addl %eax,%edi + addl 36(%esp),%edx + andl %ebx,%ebp + movdqa %xmm6,%xmm0 + movdqa %xmm1,16(%esp) + xorl %ecx,%ebx + rorl $7,%eax + movl %edi,%esi + xorl %ebx,%ebp + roll $5,%edi + pslld $2,%xmm6 + addl %ebp,%edx + xorl %eax,%esi + psrld $30,%xmm0 + xorl %ebx,%eax + addl %edi,%edx + addl 40(%esp),%ecx + andl %eax,%esi + xorl %ebx,%eax + rorl $7,%edi + por %xmm0,%xmm6 + movl %edx,%ebp + xorl %eax,%esi + movdqa 96(%esp),%xmm0 + roll $5,%edx + addl %esi,%ecx + xorl %edi,%ebp + xorl %eax,%edi + addl %edx,%ecx + pshufd $238,%xmm5,%xmm1 + addl 44(%esp),%ebx + andl %edi,%ebp + xorl %eax,%edi + rorl $7,%edx + movl %ecx,%esi + xorl %edi,%ebp + roll $5,%ecx + addl %ebp,%ebx + xorl %edx,%esi + xorl %edi,%edx + addl %ecx,%ebx + addl 48(%esp),%eax + pxor %xmm3,%xmm7 + punpcklqdq %xmm6,%xmm1 + andl %edx,%esi + xorl %edi,%edx + rorl $7,%ecx + pxor %xmm0,%xmm7 + movdqa %xmm3,96(%esp) + movl %ebx,%ebp + xorl %edx,%esi + roll $5,%ebx + movdqa 144(%esp),%xmm3 + addl %esi,%eax + paddd %xmm6,%xmm2 + xorl %ecx,%ebp + pxor %xmm1,%xmm7 + xorl %edx,%ecx + addl %ebx,%eax + addl 52(%esp),%edi + andl %ecx,%ebp + movdqa %xmm7,%xmm1 + movdqa %xmm2,32(%esp) + xorl %edx,%ecx + rorl $7,%ebx + movl %eax,%esi + xorl %ecx,%ebp + roll $5,%eax + pslld $2,%xmm7 + addl %ebp,%edi + xorl %ebx,%esi + psrld $30,%xmm1 + xorl %ecx,%ebx + addl %eax,%edi + addl 56(%esp),%edx + andl %ebx,%esi + xorl %ecx,%ebx + rorl $7,%eax + por %xmm1,%xmm7 + movl %edi,%ebp + xorl %ebx,%esi + movdqa 64(%esp),%xmm1 + roll $5,%edi + addl %esi,%edx + xorl %eax,%ebp + xorl %ebx,%eax + addl %edi,%edx + pshufd $238,%xmm6,%xmm2 + addl 60(%esp),%ecx + andl %eax,%ebp + xorl %ebx,%eax + rorl $7,%edi + movl %edx,%esi + xorl %eax,%ebp + roll $5,%edx + addl %ebp,%ecx + xorl %edi,%esi + xorl %eax,%edi + addl %edx,%ecx + addl (%esp),%ebx + pxor %xmm4,%xmm0 + punpcklqdq %xmm7,%xmm2 + andl %edi,%esi + xorl %eax,%edi + rorl $7,%edx + pxor %xmm1,%xmm0 + movdqa %xmm4,64(%esp) + movl %ecx,%ebp + xorl %edi,%esi + roll $5,%ecx + movdqa %xmm3,%xmm4 + addl %esi,%ebx + paddd %xmm7,%xmm3 + xorl %edx,%ebp + pxor %xmm2,%xmm0 + xorl %edi,%edx + addl %ecx,%ebx + addl 4(%esp),%eax + andl %edx,%ebp + movdqa %xmm0,%xmm2 + movdqa %xmm3,48(%esp) + xorl %edi,%edx + rorl $7,%ecx + movl %ebx,%esi + xorl %edx,%ebp + roll $5,%ebx + pslld $2,%xmm0 + addl %ebp,%eax + xorl %ecx,%esi + psrld $30,%xmm2 + xorl %edx,%ecx + addl %ebx,%eax + addl 8(%esp),%edi + andl %ecx,%esi + xorl %edx,%ecx + rorl $7,%ebx + por %xmm2,%xmm0 + movl %eax,%ebp + xorl %ecx,%esi + movdqa 80(%esp),%xmm2 + roll $5,%eax + addl %esi,%edi + xorl %ebx,%ebp + xorl %ecx,%ebx + addl %eax,%edi + pshufd $238,%xmm7,%xmm3 + addl 12(%esp),%edx + andl %ebx,%ebp + xorl %ecx,%ebx + rorl $7,%eax + movl %edi,%esi + xorl %ebx,%ebp + roll $5,%edi + addl %ebp,%edx + xorl %eax,%esi + xorl %ebx,%eax + addl %edi,%edx + addl 16(%esp),%ecx + pxor %xmm5,%xmm1 + punpcklqdq %xmm0,%xmm3 + andl %eax,%esi + xorl %ebx,%eax + rorl $7,%edi + pxor %xmm2,%xmm1 + movdqa %xmm5,80(%esp) + movl %edx,%ebp + xorl %eax,%esi + roll $5,%edx + movdqa %xmm4,%xmm5 + addl %esi,%ecx + paddd %xmm0,%xmm4 + xorl %edi,%ebp + pxor %xmm3,%xmm1 + xorl %eax,%edi + addl %edx,%ecx + addl 20(%esp),%ebx + andl %edi,%ebp + movdqa %xmm1,%xmm3 + movdqa %xmm4,(%esp) + xorl %eax,%edi + rorl $7,%edx + movl %ecx,%esi + xorl %edi,%ebp + roll $5,%ecx + pslld $2,%xmm1 + addl %ebp,%ebx + xorl %edx,%esi + psrld $30,%xmm3 + xorl %edi,%edx + addl %ecx,%ebx + addl 24(%esp),%eax + andl %edx,%esi + xorl %edi,%edx + rorl $7,%ecx + por %xmm3,%xmm1 + movl %ebx,%ebp + xorl %edx,%esi + movdqa 96(%esp),%xmm3 + roll $5,%ebx + addl %esi,%eax + xorl %ecx,%ebp + xorl %edx,%ecx + addl %ebx,%eax + pshufd $238,%xmm0,%xmm4 + addl 28(%esp),%edi + andl %ecx,%ebp + xorl %edx,%ecx + rorl $7,%ebx + movl %eax,%esi + xorl %ecx,%ebp + roll $5,%eax + addl %ebp,%edi + xorl %ebx,%esi + xorl %ecx,%ebx + addl %eax,%edi + addl 32(%esp),%edx + pxor %xmm6,%xmm2 + punpcklqdq %xmm1,%xmm4 + andl %ebx,%esi + xorl %ecx,%ebx + rorl $7,%eax + pxor %xmm3,%xmm2 + movdqa %xmm6,96(%esp) + movl %edi,%ebp + xorl %ebx,%esi + roll $5,%edi + movdqa %xmm5,%xmm6 + addl %esi,%edx + paddd %xmm1,%xmm5 + xorl %eax,%ebp + pxor %xmm4,%xmm2 + xorl %ebx,%eax + addl %edi,%edx + addl 36(%esp),%ecx + andl %eax,%ebp + movdqa %xmm2,%xmm4 + movdqa %xmm5,16(%esp) + xorl %ebx,%eax + rorl $7,%edi + movl %edx,%esi + xorl %eax,%ebp + roll $5,%edx + pslld $2,%xmm2 + addl %ebp,%ecx + xorl %edi,%esi + psrld $30,%xmm4 + xorl %eax,%edi + addl %edx,%ecx + addl 40(%esp),%ebx + andl %edi,%esi + xorl %eax,%edi + rorl $7,%edx + por %xmm4,%xmm2 + movl %ecx,%ebp + xorl %edi,%esi + movdqa 64(%esp),%xmm4 + roll $5,%ecx + addl %esi,%ebx + xorl %edx,%ebp + xorl %edi,%edx + addl %ecx,%ebx + pshufd $238,%xmm1,%xmm5 + addl 44(%esp),%eax + andl %edx,%ebp + xorl %edi,%edx + rorl $7,%ecx + movl %ebx,%esi + xorl %edx,%ebp + roll $5,%ebx + addl %ebp,%eax + xorl %edx,%esi + addl %ebx,%eax + addl 48(%esp),%edi + pxor %xmm7,%xmm3 + punpcklqdq %xmm2,%xmm5 + xorl %ecx,%esi + movl %eax,%ebp + roll $5,%eax + pxor %xmm4,%xmm3 + movdqa %xmm7,64(%esp) + addl %esi,%edi + xorl %ecx,%ebp + movdqa %xmm6,%xmm7 + rorl $7,%ebx + paddd %xmm2,%xmm6 + addl %eax,%edi + pxor %xmm5,%xmm3 + addl 52(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + roll $5,%edi + movdqa %xmm3,%xmm5 + movdqa %xmm6,32(%esp) + addl %ebp,%edx + xorl %ebx,%esi + rorl $7,%eax + addl %edi,%edx + pslld $2,%xmm3 + addl 56(%esp),%ecx + xorl %eax,%esi + psrld $30,%xmm5 + movl %edx,%ebp + roll $5,%edx + addl %esi,%ecx + xorl %eax,%ebp + rorl $7,%edi + addl %edx,%ecx + por %xmm5,%xmm3 + addl 60(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + roll $5,%ecx + addl %ebp,%ebx + xorl %edi,%esi + rorl $7,%edx + addl %ecx,%ebx + addl (%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + roll $5,%ebx + addl %esi,%eax + xorl %edx,%ebp + rorl $7,%ecx + paddd %xmm3,%xmm7 + addl %ebx,%eax + addl 4(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + movdqa %xmm7,48(%esp) + roll $5,%eax + addl %ebp,%edi + xorl %ecx,%esi + rorl $7,%ebx + addl %eax,%edi + addl 8(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + roll $5,%edi + addl %esi,%edx + xorl %ebx,%ebp + rorl $7,%eax + addl %edi,%edx + addl 12(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + roll $5,%edx + addl %ebp,%ecx + xorl %eax,%esi + rorl $7,%edi + addl %edx,%ecx + movl 196(%esp),%ebp + cmpl 200(%esp),%ebp + je L007done + movdqa 160(%esp),%xmm7 + movdqa 176(%esp),%xmm6 + movdqu (%ebp),%xmm0 + movdqu 16(%ebp),%xmm1 + movdqu 32(%ebp),%xmm2 + movdqu 48(%ebp),%xmm3 + addl $64,%ebp +.byte 102,15,56,0,198 + movl %ebp,196(%esp) + movdqa %xmm7,96(%esp) + addl 16(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + roll $5,%ecx + addl %esi,%ebx + xorl %edi,%ebp + rorl $7,%edx +.byte 102,15,56,0,206 + addl %ecx,%ebx + addl 20(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + paddd %xmm7,%xmm0 + roll $5,%ebx + addl %ebp,%eax + xorl %edx,%esi + rorl $7,%ecx + movdqa %xmm0,(%esp) + addl %ebx,%eax + addl 24(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + psubd %xmm7,%xmm0 + roll $5,%eax + addl %esi,%edi + xorl %ecx,%ebp + rorl $7,%ebx + addl %eax,%edi + addl 28(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + roll $5,%edi + addl %ebp,%edx + xorl %ebx,%esi + rorl $7,%eax + addl %edi,%edx + addl 32(%esp),%ecx + xorl %eax,%esi + movl %edx,%ebp + roll $5,%edx + addl %esi,%ecx + xorl %eax,%ebp + rorl $7,%edi +.byte 102,15,56,0,214 + addl %edx,%ecx + addl 36(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + paddd %xmm7,%xmm1 + roll $5,%ecx + addl %ebp,%ebx + xorl %edi,%esi + rorl $7,%edx + movdqa %xmm1,16(%esp) + addl %ecx,%ebx + addl 40(%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + psubd %xmm7,%xmm1 + roll $5,%ebx + addl %esi,%eax + xorl %edx,%ebp + rorl $7,%ecx + addl %ebx,%eax + addl 44(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + roll $5,%eax + addl %ebp,%edi + xorl %ecx,%esi + rorl $7,%ebx + addl %eax,%edi + addl 48(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + roll $5,%edi + addl %esi,%edx + xorl %ebx,%ebp + rorl $7,%eax +.byte 102,15,56,0,222 + addl %edi,%edx + addl 52(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + paddd %xmm7,%xmm2 + roll $5,%edx + addl %ebp,%ecx + xorl %eax,%esi + rorl $7,%edi + movdqa %xmm2,32(%esp) + addl %edx,%ecx + addl 56(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + psubd %xmm7,%xmm2 + roll $5,%ecx + addl %esi,%ebx + xorl %edi,%ebp + rorl $7,%edx + addl %ecx,%ebx + addl 60(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + roll $5,%ebx + addl %ebp,%eax + rorl $7,%ecx + addl %ebx,%eax + movl 192(%esp),%ebp + addl (%ebp),%eax + addl 4(%ebp),%esi + addl 8(%ebp),%ecx + movl %eax,(%ebp) + addl 12(%ebp),%edx + movl %esi,4(%ebp) + addl 16(%ebp),%edi + movl %ecx,8(%ebp) + movl %ecx,%ebx + movl %edx,12(%ebp) + xorl %edx,%ebx + movl %edi,16(%ebp) + movl %esi,%ebp + pshufd $238,%xmm0,%xmm4 + andl %ebx,%esi + movl %ebp,%ebx + jmp L006loop +.align 4,0x90 +L007done: + addl 16(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + roll $5,%ecx + addl %esi,%ebx + xorl %edi,%ebp + rorl $7,%edx + addl %ecx,%ebx + addl 20(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + roll $5,%ebx + addl %ebp,%eax + xorl %edx,%esi + rorl $7,%ecx + addl %ebx,%eax + addl 24(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + roll $5,%eax + addl %esi,%edi + xorl %ecx,%ebp + rorl $7,%ebx + addl %eax,%edi + addl 28(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + roll $5,%edi + addl %ebp,%edx + xorl %ebx,%esi + rorl $7,%eax + addl %edi,%edx + addl 32(%esp),%ecx + xorl %eax,%esi + movl %edx,%ebp + roll $5,%edx + addl %esi,%ecx + xorl %eax,%ebp + rorl $7,%edi + addl %edx,%ecx + addl 36(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + roll $5,%ecx + addl %ebp,%ebx + xorl %edi,%esi + rorl $7,%edx + addl %ecx,%ebx + addl 40(%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + roll $5,%ebx + addl %esi,%eax + xorl %edx,%ebp + rorl $7,%ecx + addl %ebx,%eax + addl 44(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + roll $5,%eax + addl %ebp,%edi + xorl %ecx,%esi + rorl $7,%ebx + addl %eax,%edi + addl 48(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + roll $5,%edi + addl %esi,%edx + xorl %ebx,%ebp + rorl $7,%eax + addl %edi,%edx + addl 52(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + roll $5,%edx + addl %ebp,%ecx + xorl %eax,%esi + rorl $7,%edi + addl %edx,%ecx + addl 56(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + roll $5,%ecx + addl %esi,%ebx + xorl %edi,%ebp + rorl $7,%edx + addl %ecx,%ebx + addl 60(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + roll $5,%ebx + addl %ebp,%eax + rorl $7,%ecx + addl %ebx,%eax + movl 192(%esp),%ebp + addl (%ebp),%eax + movl 204(%esp),%esp + addl 4(%ebp),%esi + addl 8(%ebp),%ecx + movl %eax,(%ebp) + addl 12(%ebp),%edx + movl %esi,4(%ebp) + addl 16(%ebp),%edi + movl %ecx,8(%ebp) + movl %edx,12(%ebp) + movl %edi,16(%ebp) + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.type __sha1_block_data_order_avx,@function +.align 4 +__sha1_block_data_order_avx: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + call L008pic_point +L008pic_point: + popl %ebp + leal LK_XX_XX-L008pic_point(%ebp),%ebp +Lavx_shortcut: + vzeroall + vmovdqa (%ebp),%xmm7 + vmovdqa 16(%ebp),%xmm0 + vmovdqa 32(%ebp),%xmm1 + vmovdqa 48(%ebp),%xmm2 + vmovdqa 64(%ebp),%xmm6 + movl 20(%esp),%edi + movl 24(%esp),%ebp + movl 28(%esp),%edx + movl %esp,%esi + subl $208,%esp + andl $-64,%esp + vmovdqa %xmm0,112(%esp) + vmovdqa %xmm1,128(%esp) + vmovdqa %xmm2,144(%esp) + shll $6,%edx + vmovdqa %xmm7,160(%esp) + addl %ebp,%edx + vmovdqa %xmm6,176(%esp) + addl $64,%ebp + movl %edi,192(%esp) + movl %ebp,196(%esp) + movl %edx,200(%esp) + movl %esi,204(%esp) + movl (%edi),%eax + movl 4(%edi),%ebx + movl 8(%edi),%ecx + movl 12(%edi),%edx + movl 16(%edi),%edi + movl %ebx,%esi + vmovdqu -64(%ebp),%xmm0 + vmovdqu -48(%ebp),%xmm1 + vmovdqu -32(%ebp),%xmm2 + vmovdqu -16(%ebp),%xmm3 + vpshufb %xmm6,%xmm0,%xmm0 + vpshufb %xmm6,%xmm1,%xmm1 + vpshufb %xmm6,%xmm2,%xmm2 + vmovdqa %xmm7,96(%esp) + vpshufb %xmm6,%xmm3,%xmm3 + vpaddd %xmm7,%xmm0,%xmm4 + vpaddd %xmm7,%xmm1,%xmm5 + vpaddd %xmm7,%xmm2,%xmm6 + vmovdqa %xmm4,(%esp) + movl %ecx,%ebp + vmovdqa %xmm5,16(%esp) + xorl %edx,%ebp + vmovdqa %xmm6,32(%esp) + andl %ebp,%esi + jmp L009loop +.align 4,0x90 +L009loop: + shrdl $2,%ebx,%ebx + xorl %edx,%esi + vpalignr $8,%xmm0,%xmm1,%xmm4 + movl %eax,%ebp + addl (%esp),%edi + vpaddd %xmm3,%xmm7,%xmm7 + vmovdqa %xmm0,64(%esp) + xorl %ecx,%ebx + shldl $5,%eax,%eax + vpsrldq $4,%xmm3,%xmm6 + addl %esi,%edi + andl %ebx,%ebp + vpxor %xmm0,%xmm4,%xmm4 + xorl %ecx,%ebx + addl %eax,%edi + vpxor %xmm2,%xmm6,%xmm6 + shrdl $7,%eax,%eax + xorl %ecx,%ebp + vmovdqa %xmm7,48(%esp) + movl %edi,%esi + addl 4(%esp),%edx + vpxor %xmm6,%xmm4,%xmm4 + xorl %ebx,%eax + shldl $5,%edi,%edi + addl %ebp,%edx + andl %eax,%esi + vpsrld $31,%xmm4,%xmm6 + xorl %ebx,%eax + addl %edi,%edx + shrdl $7,%edi,%edi + xorl %ebx,%esi + vpslldq $12,%xmm4,%xmm0 + vpaddd %xmm4,%xmm4,%xmm4 + movl %edx,%ebp + addl 8(%esp),%ecx + xorl %eax,%edi + shldl $5,%edx,%edx + vpsrld $30,%xmm0,%xmm7 + vpor %xmm6,%xmm4,%xmm4 + addl %esi,%ecx + andl %edi,%ebp + xorl %eax,%edi + addl %edx,%ecx + vpslld $2,%xmm0,%xmm0 + shrdl $7,%edx,%edx + xorl %eax,%ebp + vpxor %xmm7,%xmm4,%xmm4 + movl %ecx,%esi + addl 12(%esp),%ebx + xorl %edi,%edx + shldl $5,%ecx,%ecx + vpxor %xmm0,%xmm4,%xmm4 + addl %ebp,%ebx + andl %edx,%esi + vmovdqa 96(%esp),%xmm0 + xorl %edi,%edx + addl %ecx,%ebx + shrdl $7,%ecx,%ecx + xorl %edi,%esi + vpalignr $8,%xmm1,%xmm2,%xmm5 + movl %ebx,%ebp + addl 16(%esp),%eax + vpaddd %xmm4,%xmm0,%xmm0 + vmovdqa %xmm1,80(%esp) + xorl %edx,%ecx + shldl $5,%ebx,%ebx + vpsrldq $4,%xmm4,%xmm7 + addl %esi,%eax + andl %ecx,%ebp + vpxor %xmm1,%xmm5,%xmm5 + xorl %edx,%ecx + addl %ebx,%eax + vpxor %xmm3,%xmm7,%xmm7 + shrdl $7,%ebx,%ebx + xorl %edx,%ebp + vmovdqa %xmm0,(%esp) + movl %eax,%esi + addl 20(%esp),%edi + vpxor %xmm7,%xmm5,%xmm5 + xorl %ecx,%ebx + shldl $5,%eax,%eax + addl %ebp,%edi + andl %ebx,%esi + vpsrld $31,%xmm5,%xmm7 + xorl %ecx,%ebx + addl %eax,%edi + shrdl $7,%eax,%eax + xorl %ecx,%esi + vpslldq $12,%xmm5,%xmm1 + vpaddd %xmm5,%xmm5,%xmm5 + movl %edi,%ebp + addl 24(%esp),%edx + xorl %ebx,%eax + shldl $5,%edi,%edi + vpsrld $30,%xmm1,%xmm0 + vpor %xmm7,%xmm5,%xmm5 + addl %esi,%edx + andl %eax,%ebp + xorl %ebx,%eax + addl %edi,%edx + vpslld $2,%xmm1,%xmm1 + shrdl $7,%edi,%edi + xorl %ebx,%ebp + vpxor %xmm0,%xmm5,%xmm5 + movl %edx,%esi + addl 28(%esp),%ecx + xorl %eax,%edi + shldl $5,%edx,%edx + vpxor %xmm1,%xmm5,%xmm5 + addl %ebp,%ecx + andl %edi,%esi + vmovdqa 112(%esp),%xmm1 + xorl %eax,%edi + addl %edx,%ecx + shrdl $7,%edx,%edx + xorl %eax,%esi + vpalignr $8,%xmm2,%xmm3,%xmm6 + movl %ecx,%ebp + addl 32(%esp),%ebx + vpaddd %xmm5,%xmm1,%xmm1 + vmovdqa %xmm2,96(%esp) + xorl %edi,%edx + shldl $5,%ecx,%ecx + vpsrldq $4,%xmm5,%xmm0 + addl %esi,%ebx + andl %edx,%ebp + vpxor %xmm2,%xmm6,%xmm6 + xorl %edi,%edx + addl %ecx,%ebx + vpxor %xmm4,%xmm0,%xmm0 + shrdl $7,%ecx,%ecx + xorl %edi,%ebp + vmovdqa %xmm1,16(%esp) + movl %ebx,%esi + addl 36(%esp),%eax + vpxor %xmm0,%xmm6,%xmm6 + xorl %edx,%ecx + shldl $5,%ebx,%ebx + addl %ebp,%eax + andl %ecx,%esi + vpsrld $31,%xmm6,%xmm0 + xorl %edx,%ecx + addl %ebx,%eax + shrdl $7,%ebx,%ebx + xorl %edx,%esi + vpslldq $12,%xmm6,%xmm2 + vpaddd %xmm6,%xmm6,%xmm6 + movl %eax,%ebp + addl 40(%esp),%edi + xorl %ecx,%ebx + shldl $5,%eax,%eax + vpsrld $30,%xmm2,%xmm1 + vpor %xmm0,%xmm6,%xmm6 + addl %esi,%edi + andl %ebx,%ebp + xorl %ecx,%ebx + addl %eax,%edi + vpslld $2,%xmm2,%xmm2 + vmovdqa 64(%esp),%xmm0 + shrdl $7,%eax,%eax + xorl %ecx,%ebp + vpxor %xmm1,%xmm6,%xmm6 + movl %edi,%esi + addl 44(%esp),%edx + xorl %ebx,%eax + shldl $5,%edi,%edi + vpxor %xmm2,%xmm6,%xmm6 + addl %ebp,%edx + andl %eax,%esi + vmovdqa 112(%esp),%xmm2 + xorl %ebx,%eax + addl %edi,%edx + shrdl $7,%edi,%edi + xorl %ebx,%esi + vpalignr $8,%xmm3,%xmm4,%xmm7 + movl %edx,%ebp + addl 48(%esp),%ecx + vpaddd %xmm6,%xmm2,%xmm2 + vmovdqa %xmm3,64(%esp) + xorl %eax,%edi + shldl $5,%edx,%edx + vpsrldq $4,%xmm6,%xmm1 + addl %esi,%ecx + andl %edi,%ebp + vpxor %xmm3,%xmm7,%xmm7 + xorl %eax,%edi + addl %edx,%ecx + vpxor %xmm5,%xmm1,%xmm1 + shrdl $7,%edx,%edx + xorl %eax,%ebp + vmovdqa %xmm2,32(%esp) + movl %ecx,%esi + addl 52(%esp),%ebx + vpxor %xmm1,%xmm7,%xmm7 + xorl %edi,%edx + shldl $5,%ecx,%ecx + addl %ebp,%ebx + andl %edx,%esi + vpsrld $31,%xmm7,%xmm1 + xorl %edi,%edx + addl %ecx,%ebx + shrdl $7,%ecx,%ecx + xorl %edi,%esi + vpslldq $12,%xmm7,%xmm3 + vpaddd %xmm7,%xmm7,%xmm7 + movl %ebx,%ebp + addl 56(%esp),%eax + xorl %edx,%ecx + shldl $5,%ebx,%ebx + vpsrld $30,%xmm3,%xmm2 + vpor %xmm1,%xmm7,%xmm7 + addl %esi,%eax + andl %ecx,%ebp + xorl %edx,%ecx + addl %ebx,%eax + vpslld $2,%xmm3,%xmm3 + vmovdqa 80(%esp),%xmm1 + shrdl $7,%ebx,%ebx + xorl %edx,%ebp + vpxor %xmm2,%xmm7,%xmm7 + movl %eax,%esi + addl 60(%esp),%edi + xorl %ecx,%ebx + shldl $5,%eax,%eax + vpxor %xmm3,%xmm7,%xmm7 + addl %ebp,%edi + andl %ebx,%esi + vmovdqa 112(%esp),%xmm3 + xorl %ecx,%ebx + addl %eax,%edi + vpalignr $8,%xmm6,%xmm7,%xmm2 + vpxor %xmm4,%xmm0,%xmm0 + shrdl $7,%eax,%eax + xorl %ecx,%esi + movl %edi,%ebp + addl (%esp),%edx + vpxor %xmm1,%xmm0,%xmm0 + vmovdqa %xmm4,80(%esp) + xorl %ebx,%eax + shldl $5,%edi,%edi + vmovdqa %xmm3,%xmm4 + vpaddd %xmm7,%xmm3,%xmm3 + addl %esi,%edx + andl %eax,%ebp + vpxor %xmm2,%xmm0,%xmm0 + xorl %ebx,%eax + addl %edi,%edx + shrdl $7,%edi,%edi + xorl %ebx,%ebp + vpsrld $30,%xmm0,%xmm2 + vmovdqa %xmm3,48(%esp) + movl %edx,%esi + addl 4(%esp),%ecx + xorl %eax,%edi + shldl $5,%edx,%edx + vpslld $2,%xmm0,%xmm0 + addl %ebp,%ecx + andl %edi,%esi + xorl %eax,%edi + addl %edx,%ecx + shrdl $7,%edx,%edx + xorl %eax,%esi + movl %ecx,%ebp + addl 8(%esp),%ebx + vpor %xmm2,%xmm0,%xmm0 + xorl %edi,%edx + shldl $5,%ecx,%ecx + vmovdqa 96(%esp),%xmm2 + addl %esi,%ebx + andl %edx,%ebp + xorl %edi,%edx + addl %ecx,%ebx + addl 12(%esp),%eax + xorl %edi,%ebp + movl %ebx,%esi + shldl $5,%ebx,%ebx + addl %ebp,%eax + xorl %edx,%esi + shrdl $7,%ecx,%ecx + addl %ebx,%eax + vpalignr $8,%xmm7,%xmm0,%xmm3 + vpxor %xmm5,%xmm1,%xmm1 + addl 16(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + shldl $5,%eax,%eax + vpxor %xmm2,%xmm1,%xmm1 + vmovdqa %xmm5,96(%esp) + addl %esi,%edi + xorl %ecx,%ebp + vmovdqa %xmm4,%xmm5 + vpaddd %xmm0,%xmm4,%xmm4 + shrdl $7,%ebx,%ebx + addl %eax,%edi + vpxor %xmm3,%xmm1,%xmm1 + addl 20(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + shldl $5,%edi,%edi + vpsrld $30,%xmm1,%xmm3 + vmovdqa %xmm4,(%esp) + addl %ebp,%edx + xorl %ebx,%esi + shrdl $7,%eax,%eax + addl %edi,%edx + vpslld $2,%xmm1,%xmm1 + addl 24(%esp),%ecx + xorl %eax,%esi + movl %edx,%ebp + shldl $5,%edx,%edx + addl %esi,%ecx + xorl %eax,%ebp + shrdl $7,%edi,%edi + addl %edx,%ecx + vpor %xmm3,%xmm1,%xmm1 + addl 28(%esp),%ebx + xorl %edi,%ebp + vmovdqa 64(%esp),%xmm3 + movl %ecx,%esi + shldl $5,%ecx,%ecx + addl %ebp,%ebx + xorl %edi,%esi + shrdl $7,%edx,%edx + addl %ecx,%ebx + vpalignr $8,%xmm0,%xmm1,%xmm4 + vpxor %xmm6,%xmm2,%xmm2 + addl 32(%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + shldl $5,%ebx,%ebx + vpxor %xmm3,%xmm2,%xmm2 + vmovdqa %xmm6,64(%esp) + addl %esi,%eax + xorl %edx,%ebp + vmovdqa 128(%esp),%xmm6 + vpaddd %xmm1,%xmm5,%xmm5 + shrdl $7,%ecx,%ecx + addl %ebx,%eax + vpxor %xmm4,%xmm2,%xmm2 + addl 36(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + shldl $5,%eax,%eax + vpsrld $30,%xmm2,%xmm4 + vmovdqa %xmm5,16(%esp) + addl %ebp,%edi + xorl %ecx,%esi + shrdl $7,%ebx,%ebx + addl %eax,%edi + vpslld $2,%xmm2,%xmm2 + addl 40(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + shldl $5,%edi,%edi + addl %esi,%edx + xorl %ebx,%ebp + shrdl $7,%eax,%eax + addl %edi,%edx + vpor %xmm4,%xmm2,%xmm2 + addl 44(%esp),%ecx + xorl %eax,%ebp + vmovdqa 80(%esp),%xmm4 + movl %edx,%esi + shldl $5,%edx,%edx + addl %ebp,%ecx + xorl %eax,%esi + shrdl $7,%edi,%edi + addl %edx,%ecx + vpalignr $8,%xmm1,%xmm2,%xmm5 + vpxor %xmm7,%xmm3,%xmm3 + addl 48(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + shldl $5,%ecx,%ecx + vpxor %xmm4,%xmm3,%xmm3 + vmovdqa %xmm7,80(%esp) + addl %esi,%ebx + xorl %edi,%ebp + vmovdqa %xmm6,%xmm7 + vpaddd %xmm2,%xmm6,%xmm6 + shrdl $7,%edx,%edx + addl %ecx,%ebx + vpxor %xmm5,%xmm3,%xmm3 + addl 52(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + shldl $5,%ebx,%ebx + vpsrld $30,%xmm3,%xmm5 + vmovdqa %xmm6,32(%esp) + addl %ebp,%eax + xorl %edx,%esi + shrdl $7,%ecx,%ecx + addl %ebx,%eax + vpslld $2,%xmm3,%xmm3 + addl 56(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + shldl $5,%eax,%eax + addl %esi,%edi + xorl %ecx,%ebp + shrdl $7,%ebx,%ebx + addl %eax,%edi + vpor %xmm5,%xmm3,%xmm3 + addl 60(%esp),%edx + xorl %ebx,%ebp + vmovdqa 96(%esp),%xmm5 + movl %edi,%esi + shldl $5,%edi,%edi + addl %ebp,%edx + xorl %ebx,%esi + shrdl $7,%eax,%eax + addl %edi,%edx + vpalignr $8,%xmm2,%xmm3,%xmm6 + vpxor %xmm0,%xmm4,%xmm4 + addl (%esp),%ecx + xorl %eax,%esi + movl %edx,%ebp + shldl $5,%edx,%edx + vpxor %xmm5,%xmm4,%xmm4 + vmovdqa %xmm0,96(%esp) + addl %esi,%ecx + xorl %eax,%ebp + vmovdqa %xmm7,%xmm0 + vpaddd %xmm3,%xmm7,%xmm7 + shrdl $7,%edi,%edi + addl %edx,%ecx + vpxor %xmm6,%xmm4,%xmm4 + addl 4(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + shldl $5,%ecx,%ecx + vpsrld $30,%xmm4,%xmm6 + vmovdqa %xmm7,48(%esp) + addl %ebp,%ebx + xorl %edi,%esi + shrdl $7,%edx,%edx + addl %ecx,%ebx + vpslld $2,%xmm4,%xmm4 + addl 8(%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + shldl $5,%ebx,%ebx + addl %esi,%eax + xorl %edx,%ebp + shrdl $7,%ecx,%ecx + addl %ebx,%eax + vpor %xmm6,%xmm4,%xmm4 + addl 12(%esp),%edi + xorl %ecx,%ebp + vmovdqa 64(%esp),%xmm6 + movl %eax,%esi + shldl $5,%eax,%eax + addl %ebp,%edi + xorl %ecx,%esi + shrdl $7,%ebx,%ebx + addl %eax,%edi + vpalignr $8,%xmm3,%xmm4,%xmm7 + vpxor %xmm1,%xmm5,%xmm5 + addl 16(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + shldl $5,%edi,%edi + vpxor %xmm6,%xmm5,%xmm5 + vmovdqa %xmm1,64(%esp) + addl %esi,%edx + xorl %ebx,%ebp + vmovdqa %xmm0,%xmm1 + vpaddd %xmm4,%xmm0,%xmm0 + shrdl $7,%eax,%eax + addl %edi,%edx + vpxor %xmm7,%xmm5,%xmm5 + addl 20(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + shldl $5,%edx,%edx + vpsrld $30,%xmm5,%xmm7 + vmovdqa %xmm0,(%esp) + addl %ebp,%ecx + xorl %eax,%esi + shrdl $7,%edi,%edi + addl %edx,%ecx + vpslld $2,%xmm5,%xmm5 + addl 24(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + shldl $5,%ecx,%ecx + addl %esi,%ebx + xorl %edi,%ebp + shrdl $7,%edx,%edx + addl %ecx,%ebx + vpor %xmm7,%xmm5,%xmm5 + addl 28(%esp),%eax + vmovdqa 80(%esp),%xmm7 + shrdl $7,%ecx,%ecx + movl %ebx,%esi + xorl %edx,%ebp + shldl $5,%ebx,%ebx + addl %ebp,%eax + xorl %ecx,%esi + xorl %edx,%ecx + addl %ebx,%eax + vpalignr $8,%xmm4,%xmm5,%xmm0 + vpxor %xmm2,%xmm6,%xmm6 + addl 32(%esp),%edi + andl %ecx,%esi + xorl %edx,%ecx + shrdl $7,%ebx,%ebx + vpxor %xmm7,%xmm6,%xmm6 + vmovdqa %xmm2,80(%esp) + movl %eax,%ebp + xorl %ecx,%esi + vmovdqa %xmm1,%xmm2 + vpaddd %xmm5,%xmm1,%xmm1 + shldl $5,%eax,%eax + addl %esi,%edi + vpxor %xmm0,%xmm6,%xmm6 + xorl %ebx,%ebp + xorl %ecx,%ebx + addl %eax,%edi + addl 36(%esp),%edx + vpsrld $30,%xmm6,%xmm0 + vmovdqa %xmm1,16(%esp) + andl %ebx,%ebp + xorl %ecx,%ebx + shrdl $7,%eax,%eax + movl %edi,%esi + vpslld $2,%xmm6,%xmm6 + xorl %ebx,%ebp + shldl $5,%edi,%edi + addl %ebp,%edx + xorl %eax,%esi + xorl %ebx,%eax + addl %edi,%edx + addl 40(%esp),%ecx + andl %eax,%esi + vpor %xmm0,%xmm6,%xmm6 + xorl %ebx,%eax + shrdl $7,%edi,%edi + vmovdqa 96(%esp),%xmm0 + movl %edx,%ebp + xorl %eax,%esi + shldl $5,%edx,%edx + addl %esi,%ecx + xorl %edi,%ebp + xorl %eax,%edi + addl %edx,%ecx + addl 44(%esp),%ebx + andl %edi,%ebp + xorl %eax,%edi + shrdl $7,%edx,%edx + movl %ecx,%esi + xorl %edi,%ebp + shldl $5,%ecx,%ecx + addl %ebp,%ebx + xorl %edx,%esi + xorl %edi,%edx + addl %ecx,%ebx + vpalignr $8,%xmm5,%xmm6,%xmm1 + vpxor %xmm3,%xmm7,%xmm7 + addl 48(%esp),%eax + andl %edx,%esi + xorl %edi,%edx + shrdl $7,%ecx,%ecx + vpxor %xmm0,%xmm7,%xmm7 + vmovdqa %xmm3,96(%esp) + movl %ebx,%ebp + xorl %edx,%esi + vmovdqa 144(%esp),%xmm3 + vpaddd %xmm6,%xmm2,%xmm2 + shldl $5,%ebx,%ebx + addl %esi,%eax + vpxor %xmm1,%xmm7,%xmm7 + xorl %ecx,%ebp + xorl %edx,%ecx + addl %ebx,%eax + addl 52(%esp),%edi + vpsrld $30,%xmm7,%xmm1 + vmovdqa %xmm2,32(%esp) + andl %ecx,%ebp + xorl %edx,%ecx + shrdl $7,%ebx,%ebx + movl %eax,%esi + vpslld $2,%xmm7,%xmm7 + xorl %ecx,%ebp + shldl $5,%eax,%eax + addl %ebp,%edi + xorl %ebx,%esi + xorl %ecx,%ebx + addl %eax,%edi + addl 56(%esp),%edx + andl %ebx,%esi + vpor %xmm1,%xmm7,%xmm7 + xorl %ecx,%ebx + shrdl $7,%eax,%eax + vmovdqa 64(%esp),%xmm1 + movl %edi,%ebp + xorl %ebx,%esi + shldl $5,%edi,%edi + addl %esi,%edx + xorl %eax,%ebp + xorl %ebx,%eax + addl %edi,%edx + addl 60(%esp),%ecx + andl %eax,%ebp + xorl %ebx,%eax + shrdl $7,%edi,%edi + movl %edx,%esi + xorl %eax,%ebp + shldl $5,%edx,%edx + addl %ebp,%ecx + xorl %edi,%esi + xorl %eax,%edi + addl %edx,%ecx + vpalignr $8,%xmm6,%xmm7,%xmm2 + vpxor %xmm4,%xmm0,%xmm0 + addl (%esp),%ebx + andl %edi,%esi + xorl %eax,%edi + shrdl $7,%edx,%edx + vpxor %xmm1,%xmm0,%xmm0 + vmovdqa %xmm4,64(%esp) + movl %ecx,%ebp + xorl %edi,%esi + vmovdqa %xmm3,%xmm4 + vpaddd %xmm7,%xmm3,%xmm3 + shldl $5,%ecx,%ecx + addl %esi,%ebx + vpxor %xmm2,%xmm0,%xmm0 + xorl %edx,%ebp + xorl %edi,%edx + addl %ecx,%ebx + addl 4(%esp),%eax + vpsrld $30,%xmm0,%xmm2 + vmovdqa %xmm3,48(%esp) + andl %edx,%ebp + xorl %edi,%edx + shrdl $7,%ecx,%ecx + movl %ebx,%esi + vpslld $2,%xmm0,%xmm0 + xorl %edx,%ebp + shldl $5,%ebx,%ebx + addl %ebp,%eax + xorl %ecx,%esi + xorl %edx,%ecx + addl %ebx,%eax + addl 8(%esp),%edi + andl %ecx,%esi + vpor %xmm2,%xmm0,%xmm0 + xorl %edx,%ecx + shrdl $7,%ebx,%ebx + vmovdqa 80(%esp),%xmm2 + movl %eax,%ebp + xorl %ecx,%esi + shldl $5,%eax,%eax + addl %esi,%edi + xorl %ebx,%ebp + xorl %ecx,%ebx + addl %eax,%edi + addl 12(%esp),%edx + andl %ebx,%ebp + xorl %ecx,%ebx + shrdl $7,%eax,%eax + movl %edi,%esi + xorl %ebx,%ebp + shldl $5,%edi,%edi + addl %ebp,%edx + xorl %eax,%esi + xorl %ebx,%eax + addl %edi,%edx + vpalignr $8,%xmm7,%xmm0,%xmm3 + vpxor %xmm5,%xmm1,%xmm1 + addl 16(%esp),%ecx + andl %eax,%esi + xorl %ebx,%eax + shrdl $7,%edi,%edi + vpxor %xmm2,%xmm1,%xmm1 + vmovdqa %xmm5,80(%esp) + movl %edx,%ebp + xorl %eax,%esi + vmovdqa %xmm4,%xmm5 + vpaddd %xmm0,%xmm4,%xmm4 + shldl $5,%edx,%edx + addl %esi,%ecx + vpxor %xmm3,%xmm1,%xmm1 + xorl %edi,%ebp + xorl %eax,%edi + addl %edx,%ecx + addl 20(%esp),%ebx + vpsrld $30,%xmm1,%xmm3 + vmovdqa %xmm4,(%esp) + andl %edi,%ebp + xorl %eax,%edi + shrdl $7,%edx,%edx + movl %ecx,%esi + vpslld $2,%xmm1,%xmm1 + xorl %edi,%ebp + shldl $5,%ecx,%ecx + addl %ebp,%ebx + xorl %edx,%esi + xorl %edi,%edx + addl %ecx,%ebx + addl 24(%esp),%eax + andl %edx,%esi + vpor %xmm3,%xmm1,%xmm1 + xorl %edi,%edx + shrdl $7,%ecx,%ecx + vmovdqa 96(%esp),%xmm3 + movl %ebx,%ebp + xorl %edx,%esi + shldl $5,%ebx,%ebx + addl %esi,%eax + xorl %ecx,%ebp + xorl %edx,%ecx + addl %ebx,%eax + addl 28(%esp),%edi + andl %ecx,%ebp + xorl %edx,%ecx + shrdl $7,%ebx,%ebx + movl %eax,%esi + xorl %ecx,%ebp + shldl $5,%eax,%eax + addl %ebp,%edi + xorl %ebx,%esi + xorl %ecx,%ebx + addl %eax,%edi + vpalignr $8,%xmm0,%xmm1,%xmm4 + vpxor %xmm6,%xmm2,%xmm2 + addl 32(%esp),%edx + andl %ebx,%esi + xorl %ecx,%ebx + shrdl $7,%eax,%eax + vpxor %xmm3,%xmm2,%xmm2 + vmovdqa %xmm6,96(%esp) + movl %edi,%ebp + xorl %ebx,%esi + vmovdqa %xmm5,%xmm6 + vpaddd %xmm1,%xmm5,%xmm5 + shldl $5,%edi,%edi + addl %esi,%edx + vpxor %xmm4,%xmm2,%xmm2 + xorl %eax,%ebp + xorl %ebx,%eax + addl %edi,%edx + addl 36(%esp),%ecx + vpsrld $30,%xmm2,%xmm4 + vmovdqa %xmm5,16(%esp) + andl %eax,%ebp + xorl %ebx,%eax + shrdl $7,%edi,%edi + movl %edx,%esi + vpslld $2,%xmm2,%xmm2 + xorl %eax,%ebp + shldl $5,%edx,%edx + addl %ebp,%ecx + xorl %edi,%esi + xorl %eax,%edi + addl %edx,%ecx + addl 40(%esp),%ebx + andl %edi,%esi + vpor %xmm4,%xmm2,%xmm2 + xorl %eax,%edi + shrdl $7,%edx,%edx + vmovdqa 64(%esp),%xmm4 + movl %ecx,%ebp + xorl %edi,%esi + shldl $5,%ecx,%ecx + addl %esi,%ebx + xorl %edx,%ebp + xorl %edi,%edx + addl %ecx,%ebx + addl 44(%esp),%eax + andl %edx,%ebp + xorl %edi,%edx + shrdl $7,%ecx,%ecx + movl %ebx,%esi + xorl %edx,%ebp + shldl $5,%ebx,%ebx + addl %ebp,%eax + xorl %edx,%esi + addl %ebx,%eax + vpalignr $8,%xmm1,%xmm2,%xmm5 + vpxor %xmm7,%xmm3,%xmm3 + addl 48(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + shldl $5,%eax,%eax + vpxor %xmm4,%xmm3,%xmm3 + vmovdqa %xmm7,64(%esp) + addl %esi,%edi + xorl %ecx,%ebp + vmovdqa %xmm6,%xmm7 + vpaddd %xmm2,%xmm6,%xmm6 + shrdl $7,%ebx,%ebx + addl %eax,%edi + vpxor %xmm5,%xmm3,%xmm3 + addl 52(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + shldl $5,%edi,%edi + vpsrld $30,%xmm3,%xmm5 + vmovdqa %xmm6,32(%esp) + addl %ebp,%edx + xorl %ebx,%esi + shrdl $7,%eax,%eax + addl %edi,%edx + vpslld $2,%xmm3,%xmm3 + addl 56(%esp),%ecx + xorl %eax,%esi + movl %edx,%ebp + shldl $5,%edx,%edx + addl %esi,%ecx + xorl %eax,%ebp + shrdl $7,%edi,%edi + addl %edx,%ecx + vpor %xmm5,%xmm3,%xmm3 + addl 60(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + shldl $5,%ecx,%ecx + addl %ebp,%ebx + xorl %edi,%esi + shrdl $7,%edx,%edx + addl %ecx,%ebx + addl (%esp),%eax + vpaddd %xmm3,%xmm7,%xmm7 + xorl %edx,%esi + movl %ebx,%ebp + shldl $5,%ebx,%ebx + addl %esi,%eax + vmovdqa %xmm7,48(%esp) + xorl %edx,%ebp + shrdl $7,%ecx,%ecx + addl %ebx,%eax + addl 4(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + shldl $5,%eax,%eax + addl %ebp,%edi + xorl %ecx,%esi + shrdl $7,%ebx,%ebx + addl %eax,%edi + addl 8(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + shldl $5,%edi,%edi + addl %esi,%edx + xorl %ebx,%ebp + shrdl $7,%eax,%eax + addl %edi,%edx + addl 12(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + shldl $5,%edx,%edx + addl %ebp,%ecx + xorl %eax,%esi + shrdl $7,%edi,%edi + addl %edx,%ecx + movl 196(%esp),%ebp + cmpl 200(%esp),%ebp + je L010done + vmovdqa 160(%esp),%xmm7 + vmovdqa 176(%esp),%xmm6 + vmovdqu (%ebp),%xmm0 + vmovdqu 16(%ebp),%xmm1 + vmovdqu 32(%ebp),%xmm2 + vmovdqu 48(%ebp),%xmm3 + addl $64,%ebp + vpshufb %xmm6,%xmm0,%xmm0 + movl %ebp,196(%esp) + vmovdqa %xmm7,96(%esp) + addl 16(%esp),%ebx + xorl %edi,%esi + vpshufb %xmm6,%xmm1,%xmm1 + movl %ecx,%ebp + shldl $5,%ecx,%ecx + vpaddd %xmm7,%xmm0,%xmm4 + addl %esi,%ebx + xorl %edi,%ebp + shrdl $7,%edx,%edx + addl %ecx,%ebx + vmovdqa %xmm4,(%esp) + addl 20(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + shldl $5,%ebx,%ebx + addl %ebp,%eax + xorl %edx,%esi + shrdl $7,%ecx,%ecx + addl %ebx,%eax + addl 24(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + shldl $5,%eax,%eax + addl %esi,%edi + xorl %ecx,%ebp + shrdl $7,%ebx,%ebx + addl %eax,%edi + addl 28(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + shldl $5,%edi,%edi + addl %ebp,%edx + xorl %ebx,%esi + shrdl $7,%eax,%eax + addl %edi,%edx + addl 32(%esp),%ecx + xorl %eax,%esi + vpshufb %xmm6,%xmm2,%xmm2 + movl %edx,%ebp + shldl $5,%edx,%edx + vpaddd %xmm7,%xmm1,%xmm5 + addl %esi,%ecx + xorl %eax,%ebp + shrdl $7,%edi,%edi + addl %edx,%ecx + vmovdqa %xmm5,16(%esp) + addl 36(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + shldl $5,%ecx,%ecx + addl %ebp,%ebx + xorl %edi,%esi + shrdl $7,%edx,%edx + addl %ecx,%ebx + addl 40(%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + shldl $5,%ebx,%ebx + addl %esi,%eax + xorl %edx,%ebp + shrdl $7,%ecx,%ecx + addl %ebx,%eax + addl 44(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + shldl $5,%eax,%eax + addl %ebp,%edi + xorl %ecx,%esi + shrdl $7,%ebx,%ebx + addl %eax,%edi + addl 48(%esp),%edx + xorl %ebx,%esi + vpshufb %xmm6,%xmm3,%xmm3 + movl %edi,%ebp + shldl $5,%edi,%edi + vpaddd %xmm7,%xmm2,%xmm6 + addl %esi,%edx + xorl %ebx,%ebp + shrdl $7,%eax,%eax + addl %edi,%edx + vmovdqa %xmm6,32(%esp) + addl 52(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + shldl $5,%edx,%edx + addl %ebp,%ecx + xorl %eax,%esi + shrdl $7,%edi,%edi + addl %edx,%ecx + addl 56(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + shldl $5,%ecx,%ecx + addl %esi,%ebx + xorl %edi,%ebp + shrdl $7,%edx,%edx + addl %ecx,%ebx + addl 60(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + shldl $5,%ebx,%ebx + addl %ebp,%eax + shrdl $7,%ecx,%ecx + addl %ebx,%eax + movl 192(%esp),%ebp + addl (%ebp),%eax + addl 4(%ebp),%esi + addl 8(%ebp),%ecx + movl %eax,(%ebp) + addl 12(%ebp),%edx + movl %esi,4(%ebp) + addl 16(%ebp),%edi + movl %ecx,%ebx + movl %ecx,8(%ebp) + xorl %edx,%ebx + movl %edx,12(%ebp) + movl %edi,16(%ebp) + movl %esi,%ebp + andl %ebx,%esi + movl %ebp,%ebx + jmp L009loop +.align 4,0x90 +L010done: + addl 16(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + shldl $5,%ecx,%ecx + addl %esi,%ebx + xorl %edi,%ebp + shrdl $7,%edx,%edx + addl %ecx,%ebx + addl 20(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + shldl $5,%ebx,%ebx + addl %ebp,%eax + xorl %edx,%esi + shrdl $7,%ecx,%ecx + addl %ebx,%eax + addl 24(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + shldl $5,%eax,%eax + addl %esi,%edi + xorl %ecx,%ebp + shrdl $7,%ebx,%ebx + addl %eax,%edi + addl 28(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + shldl $5,%edi,%edi + addl %ebp,%edx + xorl %ebx,%esi + shrdl $7,%eax,%eax + addl %edi,%edx + addl 32(%esp),%ecx + xorl %eax,%esi + movl %edx,%ebp + shldl $5,%edx,%edx + addl %esi,%ecx + xorl %eax,%ebp + shrdl $7,%edi,%edi + addl %edx,%ecx + addl 36(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + shldl $5,%ecx,%ecx + addl %ebp,%ebx + xorl %edi,%esi + shrdl $7,%edx,%edx + addl %ecx,%ebx + addl 40(%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + shldl $5,%ebx,%ebx + addl %esi,%eax + xorl %edx,%ebp + shrdl $7,%ecx,%ecx + addl %ebx,%eax + addl 44(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + shldl $5,%eax,%eax + addl %ebp,%edi + xorl %ecx,%esi + shrdl $7,%ebx,%ebx + addl %eax,%edi + addl 48(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + shldl $5,%edi,%edi + addl %esi,%edx + xorl %ebx,%ebp + shrdl $7,%eax,%eax + addl %edi,%edx + addl 52(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + shldl $5,%edx,%edx + addl %ebp,%ecx + xorl %eax,%esi + shrdl $7,%edi,%edi + addl %edx,%ecx + addl 56(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + shldl $5,%ecx,%ecx + addl %esi,%ebx + xorl %edi,%ebp + shrdl $7,%edx,%edx + addl %ecx,%ebx + addl 60(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + shldl $5,%ebx,%ebx + addl %ebp,%eax + shrdl $7,%ecx,%ecx + addl %ebx,%eax + vzeroall + movl 192(%esp),%ebp + addl (%ebp),%eax + movl 204(%esp),%esp + addl 4(%ebp),%esi + addl 8(%ebp),%ecx + movl %eax,(%ebp) + addl 12(%ebp),%edx + movl %esi,4(%ebp) + addl 16(%ebp),%edi + movl %ecx,8(%ebp) + movl %edx,12(%ebp) + movl %edi,16(%ebp) + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.align 6,0x90 +LK_XX_XX: +.long 1518500249,1518500249,1518500249,1518500249 +.long 1859775393,1859775393,1859775393,1859775393 +.long 2400959708,2400959708,2400959708,2400959708 +.long 3395469782,3395469782,3395469782,3395469782 +.long 66051,67438087,134810123,202182159 +.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 +.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115 +.byte 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82 +.byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112 +.byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 +.comm _OPENSSL_ia32cap_P,16 diff --git a/deps/openssl/config/archs/BSD-x86/asm_avx2/configdata.pm b/deps/openssl/config/archs/BSD-x86/asm_avx2/configdata.pm index 08151c2b5bbcad..3f54d762a164ed 100644 --- a/deps/openssl/config/archs/BSD-x86/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/BSD-x86/asm_avx2/configdata.pm @@ -62,7 +62,7 @@ our %config = ( options => "enable-ssl-trace no-afalgeng no-asan no-buildtest-c++ no-comp no-crypto-mdebug no-crypto-mdebug-backtrace no-dynamic-engine no-ec_nistp_64_gcc_128 no-egd no-external-tests no-fuzz-afl no-fuzz-libfuzzer no-heartbeats no-md2 no-msan no-rc5 no-sctp no-shared no-ssl3 no-ssl3-method no-ubsan no-unit-test no-weak-ssl-ciphers no-zlib no-zlib-dynamic", perl_archname => "x86_64-linux-gnu-thread-multi", perl_cmd => "/usr/bin/perl", - perl_version => "5.28.1", + perl_version => "5.26.1", perlargv => [ "no-comp", "no-shared", "no-afalgeng", "enable-ssl-trace", "BSD-x86" ], perlenv => { "AR" => undef, @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "BSD-x86", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -15447,3 +15447,4 @@ Verbose output. =back =cut + diff --git a/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/bn/bn-586.s b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/bn/bn-586.s new file mode 100644 index 00000000000000..c7c0a81c38b98a --- /dev/null +++ b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/bn/bn-586.s @@ -0,0 +1,1525 @@ +.text +.globl _bn_mul_add_words +.type _bn_mul_add_words,@function +.align 4 +_bn_mul_add_words: +L_bn_mul_add_words_begin: + call L000PIC_me_up +L000PIC_me_up: + popl %eax + leal __GLOBAL_OFFSET_TABLE_+[.-L000PIC_me_up](%eax),%eax + movl _OPENSSL_ia32cap_P@GOT(%eax),%eax + btl $26,(%eax) + jnc L001maw_non_sse2 + movl 4(%esp),%eax + movl 8(%esp),%edx + movl 12(%esp),%ecx + movd 16(%esp),%mm0 + pxor %mm1,%mm1 + jmp L002maw_sse2_entry +.align 4,0x90 +L003maw_sse2_unrolled: + movd (%eax),%mm3 + paddq %mm3,%mm1 + movd (%edx),%mm2 + pmuludq %mm0,%mm2 + movd 4(%edx),%mm4 + pmuludq %mm0,%mm4 + movd 8(%edx),%mm6 + pmuludq %mm0,%mm6 + movd 12(%edx),%mm7 + pmuludq %mm0,%mm7 + paddq %mm2,%mm1 + movd 4(%eax),%mm3 + paddq %mm4,%mm3 + movd 8(%eax),%mm5 + paddq %mm6,%mm5 + movd 12(%eax),%mm4 + paddq %mm4,%mm7 + movd %mm1,(%eax) + movd 16(%edx),%mm2 + pmuludq %mm0,%mm2 + psrlq $32,%mm1 + movd 20(%edx),%mm4 + pmuludq %mm0,%mm4 + paddq %mm3,%mm1 + movd 24(%edx),%mm6 + pmuludq %mm0,%mm6 + movd %mm1,4(%eax) + psrlq $32,%mm1 + movd 28(%edx),%mm3 + addl $32,%edx + pmuludq %mm0,%mm3 + paddq %mm5,%mm1 + movd 16(%eax),%mm5 + paddq %mm5,%mm2 + movd %mm1,8(%eax) + psrlq $32,%mm1 + paddq %mm7,%mm1 + movd 20(%eax),%mm5 + paddq %mm5,%mm4 + movd %mm1,12(%eax) + psrlq $32,%mm1 + paddq %mm2,%mm1 + movd 24(%eax),%mm5 + paddq %mm5,%mm6 + movd %mm1,16(%eax) + psrlq $32,%mm1 + paddq %mm4,%mm1 + movd 28(%eax),%mm5 + paddq %mm5,%mm3 + movd %mm1,20(%eax) + psrlq $32,%mm1 + paddq %mm6,%mm1 + movd %mm1,24(%eax) + psrlq $32,%mm1 + paddq %mm3,%mm1 + movd %mm1,28(%eax) + leal 32(%eax),%eax + psrlq $32,%mm1 + subl $8,%ecx + jz L004maw_sse2_exit +L002maw_sse2_entry: + testl $4294967288,%ecx + jnz L003maw_sse2_unrolled +.align 2,0x90 +L005maw_sse2_loop: + movd (%edx),%mm2 + movd (%eax),%mm3 + pmuludq %mm0,%mm2 + leal 4(%edx),%edx + paddq %mm3,%mm1 + paddq %mm2,%mm1 + movd %mm1,(%eax) + subl $1,%ecx + psrlq $32,%mm1 + leal 4(%eax),%eax + jnz L005maw_sse2_loop +L004maw_sse2_exit: + movd %mm1,%eax + emms + ret +.align 4,0x90 +L001maw_non_sse2: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + + xorl %esi,%esi + movl 20(%esp),%edi + movl 28(%esp),%ecx + movl 24(%esp),%ebx + andl $4294967288,%ecx + movl 32(%esp),%ebp + pushl %ecx + jz L006maw_finish +.align 4,0x90 +L007maw_loop: + # Round 0 + movl (%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl (%edi),%eax + adcl $0,%edx + movl %eax,(%edi) + movl %edx,%esi + # Round 4 + movl 4(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 4(%edi),%eax + adcl $0,%edx + movl %eax,4(%edi) + movl %edx,%esi + # Round 8 + movl 8(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 8(%edi),%eax + adcl $0,%edx + movl %eax,8(%edi) + movl %edx,%esi + # Round 12 + movl 12(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 12(%edi),%eax + adcl $0,%edx + movl %eax,12(%edi) + movl %edx,%esi + # Round 16 + movl 16(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 16(%edi),%eax + adcl $0,%edx + movl %eax,16(%edi) + movl %edx,%esi + # Round 20 + movl 20(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 20(%edi),%eax + adcl $0,%edx + movl %eax,20(%edi) + movl %edx,%esi + # Round 24 + movl 24(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 24(%edi),%eax + adcl $0,%edx + movl %eax,24(%edi) + movl %edx,%esi + # Round 28 + movl 28(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 28(%edi),%eax + adcl $0,%edx + movl %eax,28(%edi) + movl %edx,%esi + + subl $8,%ecx + leal 32(%ebx),%ebx + leal 32(%edi),%edi + jnz L007maw_loop +L006maw_finish: + movl 32(%esp),%ecx + andl $7,%ecx + jnz L008maw_finish2 + jmp L009maw_end +L008maw_finish2: + # Tail Round 0 + movl (%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl (%edi),%eax + adcl $0,%edx + decl %ecx + movl %eax,(%edi) + movl %edx,%esi + jz L009maw_end + # Tail Round 1 + movl 4(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 4(%edi),%eax + adcl $0,%edx + decl %ecx + movl %eax,4(%edi) + movl %edx,%esi + jz L009maw_end + # Tail Round 2 + movl 8(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 8(%edi),%eax + adcl $0,%edx + decl %ecx + movl %eax,8(%edi) + movl %edx,%esi + jz L009maw_end + # Tail Round 3 + movl 12(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 12(%edi),%eax + adcl $0,%edx + decl %ecx + movl %eax,12(%edi) + movl %edx,%esi + jz L009maw_end + # Tail Round 4 + movl 16(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 16(%edi),%eax + adcl $0,%edx + decl %ecx + movl %eax,16(%edi) + movl %edx,%esi + jz L009maw_end + # Tail Round 5 + movl 20(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 20(%edi),%eax + adcl $0,%edx + decl %ecx + movl %eax,20(%edi) + movl %edx,%esi + jz L009maw_end + # Tail Round 6 + movl 24(%ebx),%eax + mull %ebp + addl %esi,%eax + adcl $0,%edx + addl 24(%edi),%eax + adcl $0,%edx + movl %eax,24(%edi) + movl %edx,%esi +L009maw_end: + movl %esi,%eax + popl %ecx + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.globl _bn_mul_words +.type _bn_mul_words,@function +.align 4 +_bn_mul_words: +L_bn_mul_words_begin: + call L010PIC_me_up +L010PIC_me_up: + popl %eax + leal __GLOBAL_OFFSET_TABLE_+[.-L010PIC_me_up](%eax),%eax + movl _OPENSSL_ia32cap_P@GOT(%eax),%eax + btl $26,(%eax) + jnc L011mw_non_sse2 + movl 4(%esp),%eax + movl 8(%esp),%edx + movl 12(%esp),%ecx + movd 16(%esp),%mm0 + pxor %mm1,%mm1 +.align 4,0x90 +L012mw_sse2_loop: + movd (%edx),%mm2 + pmuludq %mm0,%mm2 + leal 4(%edx),%edx + paddq %mm2,%mm1 + movd %mm1,(%eax) + subl $1,%ecx + psrlq $32,%mm1 + leal 4(%eax),%eax + jnz L012mw_sse2_loop + movd %mm1,%eax + emms + ret +.align 4,0x90 +L011mw_non_sse2: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + + xorl %esi,%esi + movl 20(%esp),%edi + movl 24(%esp),%ebx + movl 28(%esp),%ebp + movl 32(%esp),%ecx + andl $4294967288,%ebp + jz L013mw_finish +L014mw_loop: + # Round 0 + movl (%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,(%edi) + movl %edx,%esi + # Round 4 + movl 4(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,4(%edi) + movl %edx,%esi + # Round 8 + movl 8(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,8(%edi) + movl %edx,%esi + # Round 12 + movl 12(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,12(%edi) + movl %edx,%esi + # Round 16 + movl 16(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,16(%edi) + movl %edx,%esi + # Round 20 + movl 20(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,20(%edi) + movl %edx,%esi + # Round 24 + movl 24(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,24(%edi) + movl %edx,%esi + # Round 28 + movl 28(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,28(%edi) + movl %edx,%esi + + addl $32,%ebx + addl $32,%edi + subl $8,%ebp + jz L013mw_finish + jmp L014mw_loop +L013mw_finish: + movl 28(%esp),%ebp + andl $7,%ebp + jnz L015mw_finish2 + jmp L016mw_end +L015mw_finish2: + # Tail Round 0 + movl (%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,(%edi) + movl %edx,%esi + decl %ebp + jz L016mw_end + # Tail Round 1 + movl 4(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,4(%edi) + movl %edx,%esi + decl %ebp + jz L016mw_end + # Tail Round 2 + movl 8(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,8(%edi) + movl %edx,%esi + decl %ebp + jz L016mw_end + # Tail Round 3 + movl 12(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,12(%edi) + movl %edx,%esi + decl %ebp + jz L016mw_end + # Tail Round 4 + movl 16(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,16(%edi) + movl %edx,%esi + decl %ebp + jz L016mw_end + # Tail Round 5 + movl 20(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,20(%edi) + movl %edx,%esi + decl %ebp + jz L016mw_end + # Tail Round 6 + movl 24(%ebx),%eax + mull %ecx + addl %esi,%eax + adcl $0,%edx + movl %eax,24(%edi) + movl %edx,%esi +L016mw_end: + movl %esi,%eax + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.globl _bn_sqr_words +.type _bn_sqr_words,@function +.align 4 +_bn_sqr_words: +L_bn_sqr_words_begin: + call L017PIC_me_up +L017PIC_me_up: + popl %eax + leal __GLOBAL_OFFSET_TABLE_+[.-L017PIC_me_up](%eax),%eax + movl _OPENSSL_ia32cap_P@GOT(%eax),%eax + btl $26,(%eax) + jnc L018sqr_non_sse2 + movl 4(%esp),%eax + movl 8(%esp),%edx + movl 12(%esp),%ecx +.align 4,0x90 +L019sqr_sse2_loop: + movd (%edx),%mm0 + pmuludq %mm0,%mm0 + leal 4(%edx),%edx + movq %mm0,(%eax) + subl $1,%ecx + leal 8(%eax),%eax + jnz L019sqr_sse2_loop + emms + ret +.align 4,0x90 +L018sqr_non_sse2: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + + movl 20(%esp),%esi + movl 24(%esp),%edi + movl 28(%esp),%ebx + andl $4294967288,%ebx + jz L020sw_finish +L021sw_loop: + # Round 0 + movl (%edi),%eax + mull %eax + movl %eax,(%esi) + movl %edx,4(%esi) + # Round 4 + movl 4(%edi),%eax + mull %eax + movl %eax,8(%esi) + movl %edx,12(%esi) + # Round 8 + movl 8(%edi),%eax + mull %eax + movl %eax,16(%esi) + movl %edx,20(%esi) + # Round 12 + movl 12(%edi),%eax + mull %eax + movl %eax,24(%esi) + movl %edx,28(%esi) + # Round 16 + movl 16(%edi),%eax + mull %eax + movl %eax,32(%esi) + movl %edx,36(%esi) + # Round 20 + movl 20(%edi),%eax + mull %eax + movl %eax,40(%esi) + movl %edx,44(%esi) + # Round 24 + movl 24(%edi),%eax + mull %eax + movl %eax,48(%esi) + movl %edx,52(%esi) + # Round 28 + movl 28(%edi),%eax + mull %eax + movl %eax,56(%esi) + movl %edx,60(%esi) + + addl $32,%edi + addl $64,%esi + subl $8,%ebx + jnz L021sw_loop +L020sw_finish: + movl 28(%esp),%ebx + andl $7,%ebx + jz L022sw_end + # Tail Round 0 + movl (%edi),%eax + mull %eax + movl %eax,(%esi) + decl %ebx + movl %edx,4(%esi) + jz L022sw_end + # Tail Round 1 + movl 4(%edi),%eax + mull %eax + movl %eax,8(%esi) + decl %ebx + movl %edx,12(%esi) + jz L022sw_end + # Tail Round 2 + movl 8(%edi),%eax + mull %eax + movl %eax,16(%esi) + decl %ebx + movl %edx,20(%esi) + jz L022sw_end + # Tail Round 3 + movl 12(%edi),%eax + mull %eax + movl %eax,24(%esi) + decl %ebx + movl %edx,28(%esi) + jz L022sw_end + # Tail Round 4 + movl 16(%edi),%eax + mull %eax + movl %eax,32(%esi) + decl %ebx + movl %edx,36(%esi) + jz L022sw_end + # Tail Round 5 + movl 20(%edi),%eax + mull %eax + movl %eax,40(%esi) + decl %ebx + movl %edx,44(%esi) + jz L022sw_end + # Tail Round 6 + movl 24(%edi),%eax + mull %eax + movl %eax,48(%esi) + movl %edx,52(%esi) +L022sw_end: + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.globl _bn_div_words +.type _bn_div_words,@function +.align 4 +_bn_div_words: +L_bn_div_words_begin: + movl 4(%esp),%edx + movl 8(%esp),%eax + movl 12(%esp),%ecx + divl %ecx + ret +.globl _bn_add_words +.type _bn_add_words,@function +.align 4 +_bn_add_words: +L_bn_add_words_begin: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + + movl 20(%esp),%ebx + movl 24(%esp),%esi + movl 28(%esp),%edi + movl 32(%esp),%ebp + xorl %eax,%eax + andl $4294967288,%ebp + jz L023aw_finish +L024aw_loop: + # Round 0 + movl (%esi),%ecx + movl (%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + movl %ecx,(%ebx) + # Round 1 + movl 4(%esi),%ecx + movl 4(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + movl %ecx,4(%ebx) + # Round 2 + movl 8(%esi),%ecx + movl 8(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + movl %ecx,8(%ebx) + # Round 3 + movl 12(%esi),%ecx + movl 12(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + movl %ecx,12(%ebx) + # Round 4 + movl 16(%esi),%ecx + movl 16(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + movl %ecx,16(%ebx) + # Round 5 + movl 20(%esi),%ecx + movl 20(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + movl %ecx,20(%ebx) + # Round 6 + movl 24(%esi),%ecx + movl 24(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + movl %ecx,24(%ebx) + # Round 7 + movl 28(%esi),%ecx + movl 28(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + movl %ecx,28(%ebx) + + addl $32,%esi + addl $32,%edi + addl $32,%ebx + subl $8,%ebp + jnz L024aw_loop +L023aw_finish: + movl 32(%esp),%ebp + andl $7,%ebp + jz L025aw_end + # Tail Round 0 + movl (%esi),%ecx + movl (%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,(%ebx) + jz L025aw_end + # Tail Round 1 + movl 4(%esi),%ecx + movl 4(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,4(%ebx) + jz L025aw_end + # Tail Round 2 + movl 8(%esi),%ecx + movl 8(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,8(%ebx) + jz L025aw_end + # Tail Round 3 + movl 12(%esi),%ecx + movl 12(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,12(%ebx) + jz L025aw_end + # Tail Round 4 + movl 16(%esi),%ecx + movl 16(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,16(%ebx) + jz L025aw_end + # Tail Round 5 + movl 20(%esi),%ecx + movl 20(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,20(%ebx) + jz L025aw_end + # Tail Round 6 + movl 24(%esi),%ecx + movl 24(%edi),%edx + addl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + addl %edx,%ecx + adcl $0,%eax + movl %ecx,24(%ebx) +L025aw_end: + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.globl _bn_sub_words +.type _bn_sub_words,@function +.align 4 +_bn_sub_words: +L_bn_sub_words_begin: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + + movl 20(%esp),%ebx + movl 24(%esp),%esi + movl 28(%esp),%edi + movl 32(%esp),%ebp + xorl %eax,%eax + andl $4294967288,%ebp + jz L026aw_finish +L027aw_loop: + # Round 0 + movl (%esi),%ecx + movl (%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,(%ebx) + # Round 1 + movl 4(%esi),%ecx + movl 4(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,4(%ebx) + # Round 2 + movl 8(%esi),%ecx + movl 8(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,8(%ebx) + # Round 3 + movl 12(%esi),%ecx + movl 12(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,12(%ebx) + # Round 4 + movl 16(%esi),%ecx + movl 16(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,16(%ebx) + # Round 5 + movl 20(%esi),%ecx + movl 20(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,20(%ebx) + # Round 6 + movl 24(%esi),%ecx + movl 24(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,24(%ebx) + # Round 7 + movl 28(%esi),%ecx + movl 28(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,28(%ebx) + + addl $32,%esi + addl $32,%edi + addl $32,%ebx + subl $8,%ebp + jnz L027aw_loop +L026aw_finish: + movl 32(%esp),%ebp + andl $7,%ebp + jz L028aw_end + # Tail Round 0 + movl (%esi),%ecx + movl (%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,(%ebx) + jz L028aw_end + # Tail Round 1 + movl 4(%esi),%ecx + movl 4(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,4(%ebx) + jz L028aw_end + # Tail Round 2 + movl 8(%esi),%ecx + movl 8(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,8(%ebx) + jz L028aw_end + # Tail Round 3 + movl 12(%esi),%ecx + movl 12(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,12(%ebx) + jz L028aw_end + # Tail Round 4 + movl 16(%esi),%ecx + movl 16(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,16(%ebx) + jz L028aw_end + # Tail Round 5 + movl 20(%esi),%ecx + movl 20(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,20(%ebx) + jz L028aw_end + # Tail Round 6 + movl 24(%esi),%ecx + movl 24(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,24(%ebx) +L028aw_end: + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.globl _bn_sub_part_words +.type _bn_sub_part_words,@function +.align 4 +_bn_sub_part_words: +L_bn_sub_part_words_begin: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + + movl 20(%esp),%ebx + movl 24(%esp),%esi + movl 28(%esp),%edi + movl 32(%esp),%ebp + xorl %eax,%eax + andl $4294967288,%ebp + jz L029aw_finish +L030aw_loop: + # Round 0 + movl (%esi),%ecx + movl (%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,(%ebx) + # Round 1 + movl 4(%esi),%ecx + movl 4(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,4(%ebx) + # Round 2 + movl 8(%esi),%ecx + movl 8(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,8(%ebx) + # Round 3 + movl 12(%esi),%ecx + movl 12(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,12(%ebx) + # Round 4 + movl 16(%esi),%ecx + movl 16(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,16(%ebx) + # Round 5 + movl 20(%esi),%ecx + movl 20(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,20(%ebx) + # Round 6 + movl 24(%esi),%ecx + movl 24(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,24(%ebx) + # Round 7 + movl 28(%esi),%ecx + movl 28(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,28(%ebx) + + addl $32,%esi + addl $32,%edi + addl $32,%ebx + subl $8,%ebp + jnz L030aw_loop +L029aw_finish: + movl 32(%esp),%ebp + andl $7,%ebp + jz L031aw_end + # Tail Round 0 + movl (%esi),%ecx + movl (%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,(%ebx) + addl $4,%esi + addl $4,%edi + addl $4,%ebx + decl %ebp + jz L031aw_end + # Tail Round 1 + movl (%esi),%ecx + movl (%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,(%ebx) + addl $4,%esi + addl $4,%edi + addl $4,%ebx + decl %ebp + jz L031aw_end + # Tail Round 2 + movl (%esi),%ecx + movl (%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,(%ebx) + addl $4,%esi + addl $4,%edi + addl $4,%ebx + decl %ebp + jz L031aw_end + # Tail Round 3 + movl (%esi),%ecx + movl (%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,(%ebx) + addl $4,%esi + addl $4,%edi + addl $4,%ebx + decl %ebp + jz L031aw_end + # Tail Round 4 + movl (%esi),%ecx + movl (%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,(%ebx) + addl $4,%esi + addl $4,%edi + addl $4,%ebx + decl %ebp + jz L031aw_end + # Tail Round 5 + movl (%esi),%ecx + movl (%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,(%ebx) + addl $4,%esi + addl $4,%edi + addl $4,%ebx + decl %ebp + jz L031aw_end + # Tail Round 6 + movl (%esi),%ecx + movl (%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,(%ebx) + addl $4,%esi + addl $4,%edi + addl $4,%ebx +L031aw_end: + cmpl $0,36(%esp) + je L032pw_end + movl 36(%esp),%ebp + cmpl $0,%ebp + je L032pw_end + jge L033pw_pos + # pw_neg + movl $0,%edx + subl %ebp,%edx + movl %edx,%ebp + andl $4294967288,%ebp + jz L034pw_neg_finish +L035pw_neg_loop: + # dl<0 Round 0 + movl $0,%ecx + movl (%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,(%ebx) + # dl<0 Round 1 + movl $0,%ecx + movl 4(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,4(%ebx) + # dl<0 Round 2 + movl $0,%ecx + movl 8(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,8(%ebx) + # dl<0 Round 3 + movl $0,%ecx + movl 12(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,12(%ebx) + # dl<0 Round 4 + movl $0,%ecx + movl 16(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,16(%ebx) + # dl<0 Round 5 + movl $0,%ecx + movl 20(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,20(%ebx) + # dl<0 Round 6 + movl $0,%ecx + movl 24(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,24(%ebx) + # dl<0 Round 7 + movl $0,%ecx + movl 28(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,28(%ebx) + + addl $32,%edi + addl $32,%ebx + subl $8,%ebp + jnz L035pw_neg_loop +L034pw_neg_finish: + movl 36(%esp),%edx + movl $0,%ebp + subl %edx,%ebp + andl $7,%ebp + jz L032pw_end + # dl<0 Tail Round 0 + movl $0,%ecx + movl (%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,(%ebx) + jz L032pw_end + # dl<0 Tail Round 1 + movl $0,%ecx + movl 4(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,4(%ebx) + jz L032pw_end + # dl<0 Tail Round 2 + movl $0,%ecx + movl 8(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,8(%ebx) + jz L032pw_end + # dl<0 Tail Round 3 + movl $0,%ecx + movl 12(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,12(%ebx) + jz L032pw_end + # dl<0 Tail Round 4 + movl $0,%ecx + movl 16(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,16(%ebx) + jz L032pw_end + # dl<0 Tail Round 5 + movl $0,%ecx + movl 20(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + decl %ebp + movl %ecx,20(%ebx) + jz L032pw_end + # dl<0 Tail Round 6 + movl $0,%ecx + movl 24(%edi),%edx + subl %eax,%ecx + movl $0,%eax + adcl %eax,%eax + subl %edx,%ecx + adcl $0,%eax + movl %ecx,24(%ebx) + jmp L032pw_end +L033pw_pos: + andl $4294967288,%ebp + jz L036pw_pos_finish +L037pw_pos_loop: + # dl>0 Round 0 + movl (%esi),%ecx + subl %eax,%ecx + movl %ecx,(%ebx) + jnc L038pw_nc0 + # dl>0 Round 1 + movl 4(%esi),%ecx + subl %eax,%ecx + movl %ecx,4(%ebx) + jnc L039pw_nc1 + # dl>0 Round 2 + movl 8(%esi),%ecx + subl %eax,%ecx + movl %ecx,8(%ebx) + jnc L040pw_nc2 + # dl>0 Round 3 + movl 12(%esi),%ecx + subl %eax,%ecx + movl %ecx,12(%ebx) + jnc L041pw_nc3 + # dl>0 Round 4 + movl 16(%esi),%ecx + subl %eax,%ecx + movl %ecx,16(%ebx) + jnc L042pw_nc4 + # dl>0 Round 5 + movl 20(%esi),%ecx + subl %eax,%ecx + movl %ecx,20(%ebx) + jnc L043pw_nc5 + # dl>0 Round 6 + movl 24(%esi),%ecx + subl %eax,%ecx + movl %ecx,24(%ebx) + jnc L044pw_nc6 + # dl>0 Round 7 + movl 28(%esi),%ecx + subl %eax,%ecx + movl %ecx,28(%ebx) + jnc L045pw_nc7 + + addl $32,%esi + addl $32,%ebx + subl $8,%ebp + jnz L037pw_pos_loop +L036pw_pos_finish: + movl 36(%esp),%ebp + andl $7,%ebp + jz L032pw_end + # dl>0 Tail Round 0 + movl (%esi),%ecx + subl %eax,%ecx + movl %ecx,(%ebx) + jnc L046pw_tail_nc0 + decl %ebp + jz L032pw_end + # dl>0 Tail Round 1 + movl 4(%esi),%ecx + subl %eax,%ecx + movl %ecx,4(%ebx) + jnc L047pw_tail_nc1 + decl %ebp + jz L032pw_end + # dl>0 Tail Round 2 + movl 8(%esi),%ecx + subl %eax,%ecx + movl %ecx,8(%ebx) + jnc L048pw_tail_nc2 + decl %ebp + jz L032pw_end + # dl>0 Tail Round 3 + movl 12(%esi),%ecx + subl %eax,%ecx + movl %ecx,12(%ebx) + jnc L049pw_tail_nc3 + decl %ebp + jz L032pw_end + # dl>0 Tail Round 4 + movl 16(%esi),%ecx + subl %eax,%ecx + movl %ecx,16(%ebx) + jnc L050pw_tail_nc4 + decl %ebp + jz L032pw_end + # dl>0 Tail Round 5 + movl 20(%esi),%ecx + subl %eax,%ecx + movl %ecx,20(%ebx) + jnc L051pw_tail_nc5 + decl %ebp + jz L032pw_end + # dl>0 Tail Round 6 + movl 24(%esi),%ecx + subl %eax,%ecx + movl %ecx,24(%ebx) + jnc L052pw_tail_nc6 + movl $1,%eax + jmp L032pw_end +L053pw_nc_loop: + movl (%esi),%ecx + movl %ecx,(%ebx) +L038pw_nc0: + movl 4(%esi),%ecx + movl %ecx,4(%ebx) +L039pw_nc1: + movl 8(%esi),%ecx + movl %ecx,8(%ebx) +L040pw_nc2: + movl 12(%esi),%ecx + movl %ecx,12(%ebx) +L041pw_nc3: + movl 16(%esi),%ecx + movl %ecx,16(%ebx) +L042pw_nc4: + movl 20(%esi),%ecx + movl %ecx,20(%ebx) +L043pw_nc5: + movl 24(%esi),%ecx + movl %ecx,24(%ebx) +L044pw_nc6: + movl 28(%esi),%ecx + movl %ecx,28(%ebx) +L045pw_nc7: + + addl $32,%esi + addl $32,%ebx + subl $8,%ebp + jnz L053pw_nc_loop + movl 36(%esp),%ebp + andl $7,%ebp + jz L054pw_nc_end + movl (%esi),%ecx + movl %ecx,(%ebx) +L046pw_tail_nc0: + decl %ebp + jz L054pw_nc_end + movl 4(%esi),%ecx + movl %ecx,4(%ebx) +L047pw_tail_nc1: + decl %ebp + jz L054pw_nc_end + movl 8(%esi),%ecx + movl %ecx,8(%ebx) +L048pw_tail_nc2: + decl %ebp + jz L054pw_nc_end + movl 12(%esi),%ecx + movl %ecx,12(%ebx) +L049pw_tail_nc3: + decl %ebp + jz L054pw_nc_end + movl 16(%esi),%ecx + movl %ecx,16(%ebx) +L050pw_tail_nc4: + decl %ebp + jz L054pw_nc_end + movl 20(%esi),%ecx + movl %ecx,20(%ebx) +L051pw_tail_nc5: + decl %ebp + jz L054pw_nc_end + movl 24(%esi),%ecx + movl %ecx,24(%ebx) +L052pw_tail_nc6: +L054pw_nc_end: + movl $0,%eax +L032pw_end: + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.comm _OPENSSL_ia32cap_P,16 diff --git a/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/bn/co-586.s b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/bn/co-586.s new file mode 100644 index 00000000000000..d2608b28564f5a --- /dev/null +++ b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/bn/co-586.s @@ -0,0 +1,1249 @@ +.text +.globl _bn_mul_comba8 +.type _bn_mul_comba8,@function +.align 4 +_bn_mul_comba8: +L_bn_mul_comba8_begin: + pushl %esi + movl 12(%esp),%esi + pushl %edi + movl 20(%esp),%edi + pushl %ebp + pushl %ebx + xorl %ebx,%ebx + movl (%esi),%eax + xorl %ecx,%ecx + movl (%edi),%edx + # ################## Calculate word 0 + xorl %ebp,%ebp + # mul a[0]*b[0] + mull %edx + addl %eax,%ebx + movl 20(%esp),%eax + adcl %edx,%ecx + movl (%edi),%edx + adcl $0,%ebp + movl %ebx,(%eax) + movl 4(%esi),%eax + # saved r[0] + # ################## Calculate word 1 + xorl %ebx,%ebx + # mul a[1]*b[0] + mull %edx + addl %eax,%ecx + movl (%esi),%eax + adcl %edx,%ebp + movl 4(%edi),%edx + adcl $0,%ebx + # mul a[0]*b[1] + mull %edx + addl %eax,%ecx + movl 20(%esp),%eax + adcl %edx,%ebp + movl (%edi),%edx + adcl $0,%ebx + movl %ecx,4(%eax) + movl 8(%esi),%eax + # saved r[1] + # ################## Calculate word 2 + xorl %ecx,%ecx + # mul a[2]*b[0] + mull %edx + addl %eax,%ebp + movl 4(%esi),%eax + adcl %edx,%ebx + movl 4(%edi),%edx + adcl $0,%ecx + # mul a[1]*b[1] + mull %edx + addl %eax,%ebp + movl (%esi),%eax + adcl %edx,%ebx + movl 8(%edi),%edx + adcl $0,%ecx + # mul a[0]*b[2] + mull %edx + addl %eax,%ebp + movl 20(%esp),%eax + adcl %edx,%ebx + movl (%edi),%edx + adcl $0,%ecx + movl %ebp,8(%eax) + movl 12(%esi),%eax + # saved r[2] + # ################## Calculate word 3 + xorl %ebp,%ebp + # mul a[3]*b[0] + mull %edx + addl %eax,%ebx + movl 8(%esi),%eax + adcl %edx,%ecx + movl 4(%edi),%edx + adcl $0,%ebp + # mul a[2]*b[1] + mull %edx + addl %eax,%ebx + movl 4(%esi),%eax + adcl %edx,%ecx + movl 8(%edi),%edx + adcl $0,%ebp + # mul a[1]*b[2] + mull %edx + addl %eax,%ebx + movl (%esi),%eax + adcl %edx,%ecx + movl 12(%edi),%edx + adcl $0,%ebp + # mul a[0]*b[3] + mull %edx + addl %eax,%ebx + movl 20(%esp),%eax + adcl %edx,%ecx + movl (%edi),%edx + adcl $0,%ebp + movl %ebx,12(%eax) + movl 16(%esi),%eax + # saved r[3] + # ################## Calculate word 4 + xorl %ebx,%ebx + # mul a[4]*b[0] + mull %edx + addl %eax,%ecx + movl 12(%esi),%eax + adcl %edx,%ebp + movl 4(%edi),%edx + adcl $0,%ebx + # mul a[3]*b[1] + mull %edx + addl %eax,%ecx + movl 8(%esi),%eax + adcl %edx,%ebp + movl 8(%edi),%edx + adcl $0,%ebx + # mul a[2]*b[2] + mull %edx + addl %eax,%ecx + movl 4(%esi),%eax + adcl %edx,%ebp + movl 12(%edi),%edx + adcl $0,%ebx + # mul a[1]*b[3] + mull %edx + addl %eax,%ecx + movl (%esi),%eax + adcl %edx,%ebp + movl 16(%edi),%edx + adcl $0,%ebx + # mul a[0]*b[4] + mull %edx + addl %eax,%ecx + movl 20(%esp),%eax + adcl %edx,%ebp + movl (%edi),%edx + adcl $0,%ebx + movl %ecx,16(%eax) + movl 20(%esi),%eax + # saved r[4] + # ################## Calculate word 5 + xorl %ecx,%ecx + # mul a[5]*b[0] + mull %edx + addl %eax,%ebp + movl 16(%esi),%eax + adcl %edx,%ebx + movl 4(%edi),%edx + adcl $0,%ecx + # mul a[4]*b[1] + mull %edx + addl %eax,%ebp + movl 12(%esi),%eax + adcl %edx,%ebx + movl 8(%edi),%edx + adcl $0,%ecx + # mul a[3]*b[2] + mull %edx + addl %eax,%ebp + movl 8(%esi),%eax + adcl %edx,%ebx + movl 12(%edi),%edx + adcl $0,%ecx + # mul a[2]*b[3] + mull %edx + addl %eax,%ebp + movl 4(%esi),%eax + adcl %edx,%ebx + movl 16(%edi),%edx + adcl $0,%ecx + # mul a[1]*b[4] + mull %edx + addl %eax,%ebp + movl (%esi),%eax + adcl %edx,%ebx + movl 20(%edi),%edx + adcl $0,%ecx + # mul a[0]*b[5] + mull %edx + addl %eax,%ebp + movl 20(%esp),%eax + adcl %edx,%ebx + movl (%edi),%edx + adcl $0,%ecx + movl %ebp,20(%eax) + movl 24(%esi),%eax + # saved r[5] + # ################## Calculate word 6 + xorl %ebp,%ebp + # mul a[6]*b[0] + mull %edx + addl %eax,%ebx + movl 20(%esi),%eax + adcl %edx,%ecx + movl 4(%edi),%edx + adcl $0,%ebp + # mul a[5]*b[1] + mull %edx + addl %eax,%ebx + movl 16(%esi),%eax + adcl %edx,%ecx + movl 8(%edi),%edx + adcl $0,%ebp + # mul a[4]*b[2] + mull %edx + addl %eax,%ebx + movl 12(%esi),%eax + adcl %edx,%ecx + movl 12(%edi),%edx + adcl $0,%ebp + # mul a[3]*b[3] + mull %edx + addl %eax,%ebx + movl 8(%esi),%eax + adcl %edx,%ecx + movl 16(%edi),%edx + adcl $0,%ebp + # mul a[2]*b[4] + mull %edx + addl %eax,%ebx + movl 4(%esi),%eax + adcl %edx,%ecx + movl 20(%edi),%edx + adcl $0,%ebp + # mul a[1]*b[5] + mull %edx + addl %eax,%ebx + movl (%esi),%eax + adcl %edx,%ecx + movl 24(%edi),%edx + adcl $0,%ebp + # mul a[0]*b[6] + mull %edx + addl %eax,%ebx + movl 20(%esp),%eax + adcl %edx,%ecx + movl (%edi),%edx + adcl $0,%ebp + movl %ebx,24(%eax) + movl 28(%esi),%eax + # saved r[6] + # ################## Calculate word 7 + xorl %ebx,%ebx + # mul a[7]*b[0] + mull %edx + addl %eax,%ecx + movl 24(%esi),%eax + adcl %edx,%ebp + movl 4(%edi),%edx + adcl $0,%ebx + # mul a[6]*b[1] + mull %edx + addl %eax,%ecx + movl 20(%esi),%eax + adcl %edx,%ebp + movl 8(%edi),%edx + adcl $0,%ebx + # mul a[5]*b[2] + mull %edx + addl %eax,%ecx + movl 16(%esi),%eax + adcl %edx,%ebp + movl 12(%edi),%edx + adcl $0,%ebx + # mul a[4]*b[3] + mull %edx + addl %eax,%ecx + movl 12(%esi),%eax + adcl %edx,%ebp + movl 16(%edi),%edx + adcl $0,%ebx + # mul a[3]*b[4] + mull %edx + addl %eax,%ecx + movl 8(%esi),%eax + adcl %edx,%ebp + movl 20(%edi),%edx + adcl $0,%ebx + # mul a[2]*b[5] + mull %edx + addl %eax,%ecx + movl 4(%esi),%eax + adcl %edx,%ebp + movl 24(%edi),%edx + adcl $0,%ebx + # mul a[1]*b[6] + mull %edx + addl %eax,%ecx + movl (%esi),%eax + adcl %edx,%ebp + movl 28(%edi),%edx + adcl $0,%ebx + # mul a[0]*b[7] + mull %edx + addl %eax,%ecx + movl 20(%esp),%eax + adcl %edx,%ebp + movl 4(%edi),%edx + adcl $0,%ebx + movl %ecx,28(%eax) + movl 28(%esi),%eax + # saved r[7] + # ################## Calculate word 8 + xorl %ecx,%ecx + # mul a[7]*b[1] + mull %edx + addl %eax,%ebp + movl 24(%esi),%eax + adcl %edx,%ebx + movl 8(%edi),%edx + adcl $0,%ecx + # mul a[6]*b[2] + mull %edx + addl %eax,%ebp + movl 20(%esi),%eax + adcl %edx,%ebx + movl 12(%edi),%edx + adcl $0,%ecx + # mul a[5]*b[3] + mull %edx + addl %eax,%ebp + movl 16(%esi),%eax + adcl %edx,%ebx + movl 16(%edi),%edx + adcl $0,%ecx + # mul a[4]*b[4] + mull %edx + addl %eax,%ebp + movl 12(%esi),%eax + adcl %edx,%ebx + movl 20(%edi),%edx + adcl $0,%ecx + # mul a[3]*b[5] + mull %edx + addl %eax,%ebp + movl 8(%esi),%eax + adcl %edx,%ebx + movl 24(%edi),%edx + adcl $0,%ecx + # mul a[2]*b[6] + mull %edx + addl %eax,%ebp + movl 4(%esi),%eax + adcl %edx,%ebx + movl 28(%edi),%edx + adcl $0,%ecx + # mul a[1]*b[7] + mull %edx + addl %eax,%ebp + movl 20(%esp),%eax + adcl %edx,%ebx + movl 8(%edi),%edx + adcl $0,%ecx + movl %ebp,32(%eax) + movl 28(%esi),%eax + # saved r[8] + # ################## Calculate word 9 + xorl %ebp,%ebp + # mul a[7]*b[2] + mull %edx + addl %eax,%ebx + movl 24(%esi),%eax + adcl %edx,%ecx + movl 12(%edi),%edx + adcl $0,%ebp + # mul a[6]*b[3] + mull %edx + addl %eax,%ebx + movl 20(%esi),%eax + adcl %edx,%ecx + movl 16(%edi),%edx + adcl $0,%ebp + # mul a[5]*b[4] + mull %edx + addl %eax,%ebx + movl 16(%esi),%eax + adcl %edx,%ecx + movl 20(%edi),%edx + adcl $0,%ebp + # mul a[4]*b[5] + mull %edx + addl %eax,%ebx + movl 12(%esi),%eax + adcl %edx,%ecx + movl 24(%edi),%edx + adcl $0,%ebp + # mul a[3]*b[6] + mull %edx + addl %eax,%ebx + movl 8(%esi),%eax + adcl %edx,%ecx + movl 28(%edi),%edx + adcl $0,%ebp + # mul a[2]*b[7] + mull %edx + addl %eax,%ebx + movl 20(%esp),%eax + adcl %edx,%ecx + movl 12(%edi),%edx + adcl $0,%ebp + movl %ebx,36(%eax) + movl 28(%esi),%eax + # saved r[9] + # ################## Calculate word 10 + xorl %ebx,%ebx + # mul a[7]*b[3] + mull %edx + addl %eax,%ecx + movl 24(%esi),%eax + adcl %edx,%ebp + movl 16(%edi),%edx + adcl $0,%ebx + # mul a[6]*b[4] + mull %edx + addl %eax,%ecx + movl 20(%esi),%eax + adcl %edx,%ebp + movl 20(%edi),%edx + adcl $0,%ebx + # mul a[5]*b[5] + mull %edx + addl %eax,%ecx + movl 16(%esi),%eax + adcl %edx,%ebp + movl 24(%edi),%edx + adcl $0,%ebx + # mul a[4]*b[6] + mull %edx + addl %eax,%ecx + movl 12(%esi),%eax + adcl %edx,%ebp + movl 28(%edi),%edx + adcl $0,%ebx + # mul a[3]*b[7] + mull %edx + addl %eax,%ecx + movl 20(%esp),%eax + adcl %edx,%ebp + movl 16(%edi),%edx + adcl $0,%ebx + movl %ecx,40(%eax) + movl 28(%esi),%eax + # saved r[10] + # ################## Calculate word 11 + xorl %ecx,%ecx + # mul a[7]*b[4] + mull %edx + addl %eax,%ebp + movl 24(%esi),%eax + adcl %edx,%ebx + movl 20(%edi),%edx + adcl $0,%ecx + # mul a[6]*b[5] + mull %edx + addl %eax,%ebp + movl 20(%esi),%eax + adcl %edx,%ebx + movl 24(%edi),%edx + adcl $0,%ecx + # mul a[5]*b[6] + mull %edx + addl %eax,%ebp + movl 16(%esi),%eax + adcl %edx,%ebx + movl 28(%edi),%edx + adcl $0,%ecx + # mul a[4]*b[7] + mull %edx + addl %eax,%ebp + movl 20(%esp),%eax + adcl %edx,%ebx + movl 20(%edi),%edx + adcl $0,%ecx + movl %ebp,44(%eax) + movl 28(%esi),%eax + # saved r[11] + # ################## Calculate word 12 + xorl %ebp,%ebp + # mul a[7]*b[5] + mull %edx + addl %eax,%ebx + movl 24(%esi),%eax + adcl %edx,%ecx + movl 24(%edi),%edx + adcl $0,%ebp + # mul a[6]*b[6] + mull %edx + addl %eax,%ebx + movl 20(%esi),%eax + adcl %edx,%ecx + movl 28(%edi),%edx + adcl $0,%ebp + # mul a[5]*b[7] + mull %edx + addl %eax,%ebx + movl 20(%esp),%eax + adcl %edx,%ecx + movl 24(%edi),%edx + adcl $0,%ebp + movl %ebx,48(%eax) + movl 28(%esi),%eax + # saved r[12] + # ################## Calculate word 13 + xorl %ebx,%ebx + # mul a[7]*b[6] + mull %edx + addl %eax,%ecx + movl 24(%esi),%eax + adcl %edx,%ebp + movl 28(%edi),%edx + adcl $0,%ebx + # mul a[6]*b[7] + mull %edx + addl %eax,%ecx + movl 20(%esp),%eax + adcl %edx,%ebp + movl 28(%edi),%edx + adcl $0,%ebx + movl %ecx,52(%eax) + movl 28(%esi),%eax + # saved r[13] + # ################## Calculate word 14 + xorl %ecx,%ecx + # mul a[7]*b[7] + mull %edx + addl %eax,%ebp + movl 20(%esp),%eax + adcl %edx,%ebx + adcl $0,%ecx + movl %ebp,56(%eax) + # saved r[14] + # save r[15] + movl %ebx,60(%eax) + popl %ebx + popl %ebp + popl %edi + popl %esi + ret +.globl _bn_mul_comba4 +.type _bn_mul_comba4,@function +.align 4 +_bn_mul_comba4: +L_bn_mul_comba4_begin: + pushl %esi + movl 12(%esp),%esi + pushl %edi + movl 20(%esp),%edi + pushl %ebp + pushl %ebx + xorl %ebx,%ebx + movl (%esi),%eax + xorl %ecx,%ecx + movl (%edi),%edx + # ################## Calculate word 0 + xorl %ebp,%ebp + # mul a[0]*b[0] + mull %edx + addl %eax,%ebx + movl 20(%esp),%eax + adcl %edx,%ecx + movl (%edi),%edx + adcl $0,%ebp + movl %ebx,(%eax) + movl 4(%esi),%eax + # saved r[0] + # ################## Calculate word 1 + xorl %ebx,%ebx + # mul a[1]*b[0] + mull %edx + addl %eax,%ecx + movl (%esi),%eax + adcl %edx,%ebp + movl 4(%edi),%edx + adcl $0,%ebx + # mul a[0]*b[1] + mull %edx + addl %eax,%ecx + movl 20(%esp),%eax + adcl %edx,%ebp + movl (%edi),%edx + adcl $0,%ebx + movl %ecx,4(%eax) + movl 8(%esi),%eax + # saved r[1] + # ################## Calculate word 2 + xorl %ecx,%ecx + # mul a[2]*b[0] + mull %edx + addl %eax,%ebp + movl 4(%esi),%eax + adcl %edx,%ebx + movl 4(%edi),%edx + adcl $0,%ecx + # mul a[1]*b[1] + mull %edx + addl %eax,%ebp + movl (%esi),%eax + adcl %edx,%ebx + movl 8(%edi),%edx + adcl $0,%ecx + # mul a[0]*b[2] + mull %edx + addl %eax,%ebp + movl 20(%esp),%eax + adcl %edx,%ebx + movl (%edi),%edx + adcl $0,%ecx + movl %ebp,8(%eax) + movl 12(%esi),%eax + # saved r[2] + # ################## Calculate word 3 + xorl %ebp,%ebp + # mul a[3]*b[0] + mull %edx + addl %eax,%ebx + movl 8(%esi),%eax + adcl %edx,%ecx + movl 4(%edi),%edx + adcl $0,%ebp + # mul a[2]*b[1] + mull %edx + addl %eax,%ebx + movl 4(%esi),%eax + adcl %edx,%ecx + movl 8(%edi),%edx + adcl $0,%ebp + # mul a[1]*b[2] + mull %edx + addl %eax,%ebx + movl (%esi),%eax + adcl %edx,%ecx + movl 12(%edi),%edx + adcl $0,%ebp + # mul a[0]*b[3] + mull %edx + addl %eax,%ebx + movl 20(%esp),%eax + adcl %edx,%ecx + movl 4(%edi),%edx + adcl $0,%ebp + movl %ebx,12(%eax) + movl 12(%esi),%eax + # saved r[3] + # ################## Calculate word 4 + xorl %ebx,%ebx + # mul a[3]*b[1] + mull %edx + addl %eax,%ecx + movl 8(%esi),%eax + adcl %edx,%ebp + movl 8(%edi),%edx + adcl $0,%ebx + # mul a[2]*b[2] + mull %edx + addl %eax,%ecx + movl 4(%esi),%eax + adcl %edx,%ebp + movl 12(%edi),%edx + adcl $0,%ebx + # mul a[1]*b[3] + mull %edx + addl %eax,%ecx + movl 20(%esp),%eax + adcl %edx,%ebp + movl 8(%edi),%edx + adcl $0,%ebx + movl %ecx,16(%eax) + movl 12(%esi),%eax + # saved r[4] + # ################## Calculate word 5 + xorl %ecx,%ecx + # mul a[3]*b[2] + mull %edx + addl %eax,%ebp + movl 8(%esi),%eax + adcl %edx,%ebx + movl 12(%edi),%edx + adcl $0,%ecx + # mul a[2]*b[3] + mull %edx + addl %eax,%ebp + movl 20(%esp),%eax + adcl %edx,%ebx + movl 12(%edi),%edx + adcl $0,%ecx + movl %ebp,20(%eax) + movl 12(%esi),%eax + # saved r[5] + # ################## Calculate word 6 + xorl %ebp,%ebp + # mul a[3]*b[3] + mull %edx + addl %eax,%ebx + movl 20(%esp),%eax + adcl %edx,%ecx + adcl $0,%ebp + movl %ebx,24(%eax) + # saved r[6] + # save r[7] + movl %ecx,28(%eax) + popl %ebx + popl %ebp + popl %edi + popl %esi + ret +.globl _bn_sqr_comba8 +.type _bn_sqr_comba8,@function +.align 4 +_bn_sqr_comba8: +L_bn_sqr_comba8_begin: + pushl %esi + pushl %edi + pushl %ebp + pushl %ebx + movl 20(%esp),%edi + movl 24(%esp),%esi + xorl %ebx,%ebx + xorl %ecx,%ecx + movl (%esi),%eax + # ############### Calculate word 0 + xorl %ebp,%ebp + # sqr a[0]*a[0] + mull %eax + addl %eax,%ebx + adcl %edx,%ecx + movl (%esi),%edx + adcl $0,%ebp + movl %ebx,(%edi) + movl 4(%esi),%eax + # saved r[0] + # ############### Calculate word 1 + xorl %ebx,%ebx + # sqr a[1]*a[0] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebx + addl %eax,%ecx + adcl %edx,%ebp + movl 8(%esi),%eax + adcl $0,%ebx + movl %ecx,4(%edi) + movl (%esi),%edx + # saved r[1] + # ############### Calculate word 2 + xorl %ecx,%ecx + # sqr a[2]*a[0] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ecx + addl %eax,%ebp + adcl %edx,%ebx + movl 4(%esi),%eax + adcl $0,%ecx + # sqr a[1]*a[1] + mull %eax + addl %eax,%ebp + adcl %edx,%ebx + movl (%esi),%edx + adcl $0,%ecx + movl %ebp,8(%edi) + movl 12(%esi),%eax + # saved r[2] + # ############### Calculate word 3 + xorl %ebp,%ebp + # sqr a[3]*a[0] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebp + addl %eax,%ebx + adcl %edx,%ecx + movl 8(%esi),%eax + adcl $0,%ebp + movl 4(%esi),%edx + # sqr a[2]*a[1] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebp + addl %eax,%ebx + adcl %edx,%ecx + movl 16(%esi),%eax + adcl $0,%ebp + movl %ebx,12(%edi) + movl (%esi),%edx + # saved r[3] + # ############### Calculate word 4 + xorl %ebx,%ebx + # sqr a[4]*a[0] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebx + addl %eax,%ecx + adcl %edx,%ebp + movl 12(%esi),%eax + adcl $0,%ebx + movl 4(%esi),%edx + # sqr a[3]*a[1] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebx + addl %eax,%ecx + adcl %edx,%ebp + movl 8(%esi),%eax + adcl $0,%ebx + # sqr a[2]*a[2] + mull %eax + addl %eax,%ecx + adcl %edx,%ebp + movl (%esi),%edx + adcl $0,%ebx + movl %ecx,16(%edi) + movl 20(%esi),%eax + # saved r[4] + # ############### Calculate word 5 + xorl %ecx,%ecx + # sqr a[5]*a[0] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ecx + addl %eax,%ebp + adcl %edx,%ebx + movl 16(%esi),%eax + adcl $0,%ecx + movl 4(%esi),%edx + # sqr a[4]*a[1] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ecx + addl %eax,%ebp + adcl %edx,%ebx + movl 12(%esi),%eax + adcl $0,%ecx + movl 8(%esi),%edx + # sqr a[3]*a[2] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ecx + addl %eax,%ebp + adcl %edx,%ebx + movl 24(%esi),%eax + adcl $0,%ecx + movl %ebp,20(%edi) + movl (%esi),%edx + # saved r[5] + # ############### Calculate word 6 + xorl %ebp,%ebp + # sqr a[6]*a[0] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebp + addl %eax,%ebx + adcl %edx,%ecx + movl 20(%esi),%eax + adcl $0,%ebp + movl 4(%esi),%edx + # sqr a[5]*a[1] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebp + addl %eax,%ebx + adcl %edx,%ecx + movl 16(%esi),%eax + adcl $0,%ebp + movl 8(%esi),%edx + # sqr a[4]*a[2] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebp + addl %eax,%ebx + adcl %edx,%ecx + movl 12(%esi),%eax + adcl $0,%ebp + # sqr a[3]*a[3] + mull %eax + addl %eax,%ebx + adcl %edx,%ecx + movl (%esi),%edx + adcl $0,%ebp + movl %ebx,24(%edi) + movl 28(%esi),%eax + # saved r[6] + # ############### Calculate word 7 + xorl %ebx,%ebx + # sqr a[7]*a[0] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebx + addl %eax,%ecx + adcl %edx,%ebp + movl 24(%esi),%eax + adcl $0,%ebx + movl 4(%esi),%edx + # sqr a[6]*a[1] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebx + addl %eax,%ecx + adcl %edx,%ebp + movl 20(%esi),%eax + adcl $0,%ebx + movl 8(%esi),%edx + # sqr a[5]*a[2] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebx + addl %eax,%ecx + adcl %edx,%ebp + movl 16(%esi),%eax + adcl $0,%ebx + movl 12(%esi),%edx + # sqr a[4]*a[3] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebx + addl %eax,%ecx + adcl %edx,%ebp + movl 28(%esi),%eax + adcl $0,%ebx + movl %ecx,28(%edi) + movl 4(%esi),%edx + # saved r[7] + # ############### Calculate word 8 + xorl %ecx,%ecx + # sqr a[7]*a[1] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ecx + addl %eax,%ebp + adcl %edx,%ebx + movl 24(%esi),%eax + adcl $0,%ecx + movl 8(%esi),%edx + # sqr a[6]*a[2] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ecx + addl %eax,%ebp + adcl %edx,%ebx + movl 20(%esi),%eax + adcl $0,%ecx + movl 12(%esi),%edx + # sqr a[5]*a[3] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ecx + addl %eax,%ebp + adcl %edx,%ebx + movl 16(%esi),%eax + adcl $0,%ecx + # sqr a[4]*a[4] + mull %eax + addl %eax,%ebp + adcl %edx,%ebx + movl 8(%esi),%edx + adcl $0,%ecx + movl %ebp,32(%edi) + movl 28(%esi),%eax + # saved r[8] + # ############### Calculate word 9 + xorl %ebp,%ebp + # sqr a[7]*a[2] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebp + addl %eax,%ebx + adcl %edx,%ecx + movl 24(%esi),%eax + adcl $0,%ebp + movl 12(%esi),%edx + # sqr a[6]*a[3] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebp + addl %eax,%ebx + adcl %edx,%ecx + movl 20(%esi),%eax + adcl $0,%ebp + movl 16(%esi),%edx + # sqr a[5]*a[4] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebp + addl %eax,%ebx + adcl %edx,%ecx + movl 28(%esi),%eax + adcl $0,%ebp + movl %ebx,36(%edi) + movl 12(%esi),%edx + # saved r[9] + # ############### Calculate word 10 + xorl %ebx,%ebx + # sqr a[7]*a[3] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebx + addl %eax,%ecx + adcl %edx,%ebp + movl 24(%esi),%eax + adcl $0,%ebx + movl 16(%esi),%edx + # sqr a[6]*a[4] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebx + addl %eax,%ecx + adcl %edx,%ebp + movl 20(%esi),%eax + adcl $0,%ebx + # sqr a[5]*a[5] + mull %eax + addl %eax,%ecx + adcl %edx,%ebp + movl 16(%esi),%edx + adcl $0,%ebx + movl %ecx,40(%edi) + movl 28(%esi),%eax + # saved r[10] + # ############### Calculate word 11 + xorl %ecx,%ecx + # sqr a[7]*a[4] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ecx + addl %eax,%ebp + adcl %edx,%ebx + movl 24(%esi),%eax + adcl $0,%ecx + movl 20(%esi),%edx + # sqr a[6]*a[5] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ecx + addl %eax,%ebp + adcl %edx,%ebx + movl 28(%esi),%eax + adcl $0,%ecx + movl %ebp,44(%edi) + movl 20(%esi),%edx + # saved r[11] + # ############### Calculate word 12 + xorl %ebp,%ebp + # sqr a[7]*a[5] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebp + addl %eax,%ebx + adcl %edx,%ecx + movl 24(%esi),%eax + adcl $0,%ebp + # sqr a[6]*a[6] + mull %eax + addl %eax,%ebx + adcl %edx,%ecx + movl 24(%esi),%edx + adcl $0,%ebp + movl %ebx,48(%edi) + movl 28(%esi),%eax + # saved r[12] + # ############### Calculate word 13 + xorl %ebx,%ebx + # sqr a[7]*a[6] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebx + addl %eax,%ecx + adcl %edx,%ebp + movl 28(%esi),%eax + adcl $0,%ebx + movl %ecx,52(%edi) + # saved r[13] + # ############### Calculate word 14 + xorl %ecx,%ecx + # sqr a[7]*a[7] + mull %eax + addl %eax,%ebp + adcl %edx,%ebx + adcl $0,%ecx + movl %ebp,56(%edi) + # saved r[14] + movl %ebx,60(%edi) + popl %ebx + popl %ebp + popl %edi + popl %esi + ret +.globl _bn_sqr_comba4 +.type _bn_sqr_comba4,@function +.align 4 +_bn_sqr_comba4: +L_bn_sqr_comba4_begin: + pushl %esi + pushl %edi + pushl %ebp + pushl %ebx + movl 20(%esp),%edi + movl 24(%esp),%esi + xorl %ebx,%ebx + xorl %ecx,%ecx + movl (%esi),%eax + # ############### Calculate word 0 + xorl %ebp,%ebp + # sqr a[0]*a[0] + mull %eax + addl %eax,%ebx + adcl %edx,%ecx + movl (%esi),%edx + adcl $0,%ebp + movl %ebx,(%edi) + movl 4(%esi),%eax + # saved r[0] + # ############### Calculate word 1 + xorl %ebx,%ebx + # sqr a[1]*a[0] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebx + addl %eax,%ecx + adcl %edx,%ebp + movl 8(%esi),%eax + adcl $0,%ebx + movl %ecx,4(%edi) + movl (%esi),%edx + # saved r[1] + # ############### Calculate word 2 + xorl %ecx,%ecx + # sqr a[2]*a[0] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ecx + addl %eax,%ebp + adcl %edx,%ebx + movl 4(%esi),%eax + adcl $0,%ecx + # sqr a[1]*a[1] + mull %eax + addl %eax,%ebp + adcl %edx,%ebx + movl (%esi),%edx + adcl $0,%ecx + movl %ebp,8(%edi) + movl 12(%esi),%eax + # saved r[2] + # ############### Calculate word 3 + xorl %ebp,%ebp + # sqr a[3]*a[0] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebp + addl %eax,%ebx + adcl %edx,%ecx + movl 8(%esi),%eax + adcl $0,%ebp + movl 4(%esi),%edx + # sqr a[2]*a[1] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebp + addl %eax,%ebx + adcl %edx,%ecx + movl 12(%esi),%eax + adcl $0,%ebp + movl %ebx,12(%edi) + movl 4(%esi),%edx + # saved r[3] + # ############### Calculate word 4 + xorl %ebx,%ebx + # sqr a[3]*a[1] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ebx + addl %eax,%ecx + adcl %edx,%ebp + movl 8(%esi),%eax + adcl $0,%ebx + # sqr a[2]*a[2] + mull %eax + addl %eax,%ecx + adcl %edx,%ebp + movl 8(%esi),%edx + adcl $0,%ebx + movl %ecx,16(%edi) + movl 12(%esi),%eax + # saved r[4] + # ############### Calculate word 5 + xorl %ecx,%ecx + # sqr a[3]*a[2] + mull %edx + addl %eax,%eax + adcl %edx,%edx + adcl $0,%ecx + addl %eax,%ebp + adcl %edx,%ebx + movl 12(%esi),%eax + adcl $0,%ecx + movl %ebp,20(%edi) + # saved r[5] + # ############### Calculate word 6 + xorl %ebp,%ebp + # sqr a[3]*a[3] + mull %eax + addl %eax,%ebx + adcl %edx,%ecx + adcl $0,%ebp + movl %ebx,24(%edi) + # saved r[6] + movl %ecx,28(%edi) + popl %ebx + popl %ebp + popl %edi + popl %esi + ret diff --git a/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/buildinf.h index 96c786974c2af5..060858b510af1a 100644 --- a/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: BSD-x86" -#define DATE "built on: Wed Mar 18 21:04:52 2020 UTC" +#define DATE "built on: Tue Mar 31 13:06:27 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/des/crypt586.s b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/des/crypt586.s new file mode 100644 index 00000000000000..d2c370231dfa7f --- /dev/null +++ b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/des/crypt586.s @@ -0,0 +1,877 @@ +.text +.globl _fcrypt_body +.type _fcrypt_body,@function +.align 4 +_fcrypt_body: +L_fcrypt_body_begin: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + + # Load the 2 words + xorl %edi,%edi + xorl %esi,%esi + call L000PIC_me_up +L000PIC_me_up: + popl %edx + leal __GLOBAL_OFFSET_TABLE_+[.-L000PIC_me_up](%edx),%edx + movl _DES_SPtrans@GOT(%edx),%edx + pushl %edx + movl 28(%esp),%ebp + pushl $25 +L001start: + + # Round 0 + movl 36(%esp),%eax + movl %esi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %esi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl (%ebp),%ebx + xorl %ebx,%eax + movl 4(%ebp),%ecx + xorl %esi,%eax + xorl %esi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%edi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%edi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%edi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%edi + movl 32(%esp),%ebp + + # Round 1 + movl 36(%esp),%eax + movl %edi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %edi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 8(%ebp),%ebx + xorl %ebx,%eax + movl 12(%ebp),%ecx + xorl %edi,%eax + xorl %edi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%esi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%esi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%esi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%esi + movl 32(%esp),%ebp + + # Round 2 + movl 36(%esp),%eax + movl %esi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %esi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 16(%ebp),%ebx + xorl %ebx,%eax + movl 20(%ebp),%ecx + xorl %esi,%eax + xorl %esi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%edi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%edi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%edi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%edi + movl 32(%esp),%ebp + + # Round 3 + movl 36(%esp),%eax + movl %edi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %edi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 24(%ebp),%ebx + xorl %ebx,%eax + movl 28(%ebp),%ecx + xorl %edi,%eax + xorl %edi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%esi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%esi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%esi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%esi + movl 32(%esp),%ebp + + # Round 4 + movl 36(%esp),%eax + movl %esi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %esi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 32(%ebp),%ebx + xorl %ebx,%eax + movl 36(%ebp),%ecx + xorl %esi,%eax + xorl %esi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%edi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%edi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%edi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%edi + movl 32(%esp),%ebp + + # Round 5 + movl 36(%esp),%eax + movl %edi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %edi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 40(%ebp),%ebx + xorl %ebx,%eax + movl 44(%ebp),%ecx + xorl %edi,%eax + xorl %edi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%esi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%esi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%esi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%esi + movl 32(%esp),%ebp + + # Round 6 + movl 36(%esp),%eax + movl %esi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %esi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 48(%ebp),%ebx + xorl %ebx,%eax + movl 52(%ebp),%ecx + xorl %esi,%eax + xorl %esi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%edi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%edi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%edi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%edi + movl 32(%esp),%ebp + + # Round 7 + movl 36(%esp),%eax + movl %edi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %edi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 56(%ebp),%ebx + xorl %ebx,%eax + movl 60(%ebp),%ecx + xorl %edi,%eax + xorl %edi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%esi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%esi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%esi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%esi + movl 32(%esp),%ebp + + # Round 8 + movl 36(%esp),%eax + movl %esi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %esi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 64(%ebp),%ebx + xorl %ebx,%eax + movl 68(%ebp),%ecx + xorl %esi,%eax + xorl %esi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%edi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%edi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%edi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%edi + movl 32(%esp),%ebp + + # Round 9 + movl 36(%esp),%eax + movl %edi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %edi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 72(%ebp),%ebx + xorl %ebx,%eax + movl 76(%ebp),%ecx + xorl %edi,%eax + xorl %edi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%esi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%esi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%esi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%esi + movl 32(%esp),%ebp + + # Round 10 + movl 36(%esp),%eax + movl %esi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %esi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 80(%ebp),%ebx + xorl %ebx,%eax + movl 84(%ebp),%ecx + xorl %esi,%eax + xorl %esi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%edi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%edi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%edi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%edi + movl 32(%esp),%ebp + + # Round 11 + movl 36(%esp),%eax + movl %edi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %edi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 88(%ebp),%ebx + xorl %ebx,%eax + movl 92(%ebp),%ecx + xorl %edi,%eax + xorl %edi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%esi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%esi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%esi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%esi + movl 32(%esp),%ebp + + # Round 12 + movl 36(%esp),%eax + movl %esi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %esi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 96(%ebp),%ebx + xorl %ebx,%eax + movl 100(%ebp),%ecx + xorl %esi,%eax + xorl %esi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%edi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%edi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%edi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%edi + movl 32(%esp),%ebp + + # Round 13 + movl 36(%esp),%eax + movl %edi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %edi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 104(%ebp),%ebx + xorl %ebx,%eax + movl 108(%ebp),%ecx + xorl %edi,%eax + xorl %edi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%esi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%esi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%esi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%esi + movl 32(%esp),%ebp + + # Round 14 + movl 36(%esp),%eax + movl %esi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %esi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 112(%ebp),%ebx + xorl %ebx,%eax + movl 116(%ebp),%ecx + xorl %esi,%eax + xorl %esi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%edi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%edi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%edi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%edi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%edi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%edi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%edi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%edi + movl 32(%esp),%ebp + + # Round 15 + movl 36(%esp),%eax + movl %edi,%edx + shrl $16,%edx + movl 40(%esp),%ecx + xorl %edi,%edx + andl %edx,%eax + andl %ecx,%edx + movl %eax,%ebx + shll $16,%ebx + movl %edx,%ecx + shll $16,%ecx + xorl %ebx,%eax + xorl %ecx,%edx + movl 120(%ebp),%ebx + xorl %ebx,%eax + movl 124(%ebp),%ecx + xorl %edi,%eax + xorl %edi,%edx + xorl %ecx,%edx + andl $0xfcfcfcfc,%eax + xorl %ebx,%ebx + andl $0xcfcfcfcf,%edx + xorl %ecx,%ecx + movb %al,%bl + movb %ah,%cl + rorl $4,%edx + movl 4(%esp),%ebp + xorl (%ebp,%ebx,1),%esi + movb %dl,%bl + xorl 0x200(%ebp,%ecx,1),%esi + movb %dh,%cl + shrl $16,%eax + xorl 0x100(%ebp,%ebx,1),%esi + movb %ah,%bl + shrl $16,%edx + xorl 0x300(%ebp,%ecx,1),%esi + movb %dh,%cl + andl $0xff,%eax + andl $0xff,%edx + movl 0x600(%ebp,%ebx,1),%ebx + xorl %ebx,%esi + movl 0x700(%ebp,%ecx,1),%ebx + xorl %ebx,%esi + movl 0x400(%ebp,%eax,1),%ebx + xorl %ebx,%esi + movl 0x500(%ebp,%edx,1),%ebx + xorl %ebx,%esi + movl 32(%esp),%ebp + movl (%esp),%ebx + movl %edi,%eax + decl %ebx + movl %esi,%edi + movl %eax,%esi + movl %ebx,(%esp) + jnz L001start + + # FP + movl 28(%esp),%edx + rorl $1,%edi + movl %esi,%eax + xorl %edi,%esi + andl $0xaaaaaaaa,%esi + xorl %esi,%eax + xorl %esi,%edi + + roll $23,%eax + movl %eax,%esi + xorl %edi,%eax + andl $0x03fc03fc,%eax + xorl %eax,%esi + xorl %eax,%edi + + roll $10,%esi + movl %esi,%eax + xorl %edi,%esi + andl $0x33333333,%esi + xorl %esi,%eax + xorl %esi,%edi + + roll $18,%edi + movl %edi,%esi + xorl %eax,%edi + andl $0xfff0000f,%edi + xorl %edi,%esi + xorl %edi,%eax + + roll $12,%esi + movl %esi,%edi + xorl %eax,%esi + andl $0xf0f0f0f0,%esi + xorl %esi,%edi + xorl %esi,%eax + + rorl $4,%eax + movl %eax,(%edx) + movl %edi,4(%edx) + addl $8,%esp + popl %edi + popl %esi + popl %ebx + popl %ebp + ret diff --git a/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/md5/md5-586.s b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/md5/md5-586.s new file mode 100644 index 00000000000000..2f4efe555caebb --- /dev/null +++ b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/md5/md5-586.s @@ -0,0 +1,677 @@ +.text +.globl _md5_block_asm_data_order +.type _md5_block_asm_data_order,@function +.align 4 +_md5_block_asm_data_order: +L_md5_block_asm_data_order_begin: + pushl %esi + pushl %edi + movl 12(%esp),%edi + movl 16(%esp),%esi + movl 20(%esp),%ecx + pushl %ebp + shll $6,%ecx + pushl %ebx + addl %esi,%ecx + subl $64,%ecx + movl (%edi),%eax + pushl %ecx + movl 4(%edi),%ebx + movl 8(%edi),%ecx + movl 12(%edi),%edx +L000start: + + # R0 section + movl %ecx,%edi + movl (%esi),%ebp + # R0 0 + xorl %edx,%edi + andl %ebx,%edi + leal 3614090360(%eax,%ebp,1),%eax + xorl %edx,%edi + movl 4(%esi),%ebp + addl %edi,%eax + roll $7,%eax + movl %ebx,%edi + addl %ebx,%eax + # R0 1 + xorl %ecx,%edi + andl %eax,%edi + leal 3905402710(%edx,%ebp,1),%edx + xorl %ecx,%edi + movl 8(%esi),%ebp + addl %edi,%edx + roll $12,%edx + movl %eax,%edi + addl %eax,%edx + # R0 2 + xorl %ebx,%edi + andl %edx,%edi + leal 606105819(%ecx,%ebp,1),%ecx + xorl %ebx,%edi + movl 12(%esi),%ebp + addl %edi,%ecx + roll $17,%ecx + movl %edx,%edi + addl %edx,%ecx + # R0 3 + xorl %eax,%edi + andl %ecx,%edi + leal 3250441966(%ebx,%ebp,1),%ebx + xorl %eax,%edi + movl 16(%esi),%ebp + addl %edi,%ebx + roll $22,%ebx + movl %ecx,%edi + addl %ecx,%ebx + # R0 4 + xorl %edx,%edi + andl %ebx,%edi + leal 4118548399(%eax,%ebp,1),%eax + xorl %edx,%edi + movl 20(%esi),%ebp + addl %edi,%eax + roll $7,%eax + movl %ebx,%edi + addl %ebx,%eax + # R0 5 + xorl %ecx,%edi + andl %eax,%edi + leal 1200080426(%edx,%ebp,1),%edx + xorl %ecx,%edi + movl 24(%esi),%ebp + addl %edi,%edx + roll $12,%edx + movl %eax,%edi + addl %eax,%edx + # R0 6 + xorl %ebx,%edi + andl %edx,%edi + leal 2821735955(%ecx,%ebp,1),%ecx + xorl %ebx,%edi + movl 28(%esi),%ebp + addl %edi,%ecx + roll $17,%ecx + movl %edx,%edi + addl %edx,%ecx + # R0 7 + xorl %eax,%edi + andl %ecx,%edi + leal 4249261313(%ebx,%ebp,1),%ebx + xorl %eax,%edi + movl 32(%esi),%ebp + addl %edi,%ebx + roll $22,%ebx + movl %ecx,%edi + addl %ecx,%ebx + # R0 8 + xorl %edx,%edi + andl %ebx,%edi + leal 1770035416(%eax,%ebp,1),%eax + xorl %edx,%edi + movl 36(%esi),%ebp + addl %edi,%eax + roll $7,%eax + movl %ebx,%edi + addl %ebx,%eax + # R0 9 + xorl %ecx,%edi + andl %eax,%edi + leal 2336552879(%edx,%ebp,1),%edx + xorl %ecx,%edi + movl 40(%esi),%ebp + addl %edi,%edx + roll $12,%edx + movl %eax,%edi + addl %eax,%edx + # R0 10 + xorl %ebx,%edi + andl %edx,%edi + leal 4294925233(%ecx,%ebp,1),%ecx + xorl %ebx,%edi + movl 44(%esi),%ebp + addl %edi,%ecx + roll $17,%ecx + movl %edx,%edi + addl %edx,%ecx + # R0 11 + xorl %eax,%edi + andl %ecx,%edi + leal 2304563134(%ebx,%ebp,1),%ebx + xorl %eax,%edi + movl 48(%esi),%ebp + addl %edi,%ebx + roll $22,%ebx + movl %ecx,%edi + addl %ecx,%ebx + # R0 12 + xorl %edx,%edi + andl %ebx,%edi + leal 1804603682(%eax,%ebp,1),%eax + xorl %edx,%edi + movl 52(%esi),%ebp + addl %edi,%eax + roll $7,%eax + movl %ebx,%edi + addl %ebx,%eax + # R0 13 + xorl %ecx,%edi + andl %eax,%edi + leal 4254626195(%edx,%ebp,1),%edx + xorl %ecx,%edi + movl 56(%esi),%ebp + addl %edi,%edx + roll $12,%edx + movl %eax,%edi + addl %eax,%edx + # R0 14 + xorl %ebx,%edi + andl %edx,%edi + leal 2792965006(%ecx,%ebp,1),%ecx + xorl %ebx,%edi + movl 60(%esi),%ebp + addl %edi,%ecx + roll $17,%ecx + movl %edx,%edi + addl %edx,%ecx + # R0 15 + xorl %eax,%edi + andl %ecx,%edi + leal 1236535329(%ebx,%ebp,1),%ebx + xorl %eax,%edi + movl 4(%esi),%ebp + addl %edi,%ebx + roll $22,%ebx + movl %ecx,%edi + addl %ecx,%ebx + + # R1 section + # R1 16 + xorl %ebx,%edi + andl %edx,%edi + leal 4129170786(%eax,%ebp,1),%eax + xorl %ecx,%edi + movl 24(%esi),%ebp + addl %edi,%eax + movl %ebx,%edi + roll $5,%eax + addl %ebx,%eax + # R1 17 + xorl %eax,%edi + andl %ecx,%edi + leal 3225465664(%edx,%ebp,1),%edx + xorl %ebx,%edi + movl 44(%esi),%ebp + addl %edi,%edx + movl %eax,%edi + roll $9,%edx + addl %eax,%edx + # R1 18 + xorl %edx,%edi + andl %ebx,%edi + leal 643717713(%ecx,%ebp,1),%ecx + xorl %eax,%edi + movl (%esi),%ebp + addl %edi,%ecx + movl %edx,%edi + roll $14,%ecx + addl %edx,%ecx + # R1 19 + xorl %ecx,%edi + andl %eax,%edi + leal 3921069994(%ebx,%ebp,1),%ebx + xorl %edx,%edi + movl 20(%esi),%ebp + addl %edi,%ebx + movl %ecx,%edi + roll $20,%ebx + addl %ecx,%ebx + # R1 20 + xorl %ebx,%edi + andl %edx,%edi + leal 3593408605(%eax,%ebp,1),%eax + xorl %ecx,%edi + movl 40(%esi),%ebp + addl %edi,%eax + movl %ebx,%edi + roll $5,%eax + addl %ebx,%eax + # R1 21 + xorl %eax,%edi + andl %ecx,%edi + leal 38016083(%edx,%ebp,1),%edx + xorl %ebx,%edi + movl 60(%esi),%ebp + addl %edi,%edx + movl %eax,%edi + roll $9,%edx + addl %eax,%edx + # R1 22 + xorl %edx,%edi + andl %ebx,%edi + leal 3634488961(%ecx,%ebp,1),%ecx + xorl %eax,%edi + movl 16(%esi),%ebp + addl %edi,%ecx + movl %edx,%edi + roll $14,%ecx + addl %edx,%ecx + # R1 23 + xorl %ecx,%edi + andl %eax,%edi + leal 3889429448(%ebx,%ebp,1),%ebx + xorl %edx,%edi + movl 36(%esi),%ebp + addl %edi,%ebx + movl %ecx,%edi + roll $20,%ebx + addl %ecx,%ebx + # R1 24 + xorl %ebx,%edi + andl %edx,%edi + leal 568446438(%eax,%ebp,1),%eax + xorl %ecx,%edi + movl 56(%esi),%ebp + addl %edi,%eax + movl %ebx,%edi + roll $5,%eax + addl %ebx,%eax + # R1 25 + xorl %eax,%edi + andl %ecx,%edi + leal 3275163606(%edx,%ebp,1),%edx + xorl %ebx,%edi + movl 12(%esi),%ebp + addl %edi,%edx + movl %eax,%edi + roll $9,%edx + addl %eax,%edx + # R1 26 + xorl %edx,%edi + andl %ebx,%edi + leal 4107603335(%ecx,%ebp,1),%ecx + xorl %eax,%edi + movl 32(%esi),%ebp + addl %edi,%ecx + movl %edx,%edi + roll $14,%ecx + addl %edx,%ecx + # R1 27 + xorl %ecx,%edi + andl %eax,%edi + leal 1163531501(%ebx,%ebp,1),%ebx + xorl %edx,%edi + movl 52(%esi),%ebp + addl %edi,%ebx + movl %ecx,%edi + roll $20,%ebx + addl %ecx,%ebx + # R1 28 + xorl %ebx,%edi + andl %edx,%edi + leal 2850285829(%eax,%ebp,1),%eax + xorl %ecx,%edi + movl 8(%esi),%ebp + addl %edi,%eax + movl %ebx,%edi + roll $5,%eax + addl %ebx,%eax + # R1 29 + xorl %eax,%edi + andl %ecx,%edi + leal 4243563512(%edx,%ebp,1),%edx + xorl %ebx,%edi + movl 28(%esi),%ebp + addl %edi,%edx + movl %eax,%edi + roll $9,%edx + addl %eax,%edx + # R1 30 + xorl %edx,%edi + andl %ebx,%edi + leal 1735328473(%ecx,%ebp,1),%ecx + xorl %eax,%edi + movl 48(%esi),%ebp + addl %edi,%ecx + movl %edx,%edi + roll $14,%ecx + addl %edx,%ecx + # R1 31 + xorl %ecx,%edi + andl %eax,%edi + leal 2368359562(%ebx,%ebp,1),%ebx + xorl %edx,%edi + movl 20(%esi),%ebp + addl %edi,%ebx + movl %ecx,%edi + roll $20,%ebx + addl %ecx,%ebx + + # R2 section + # R2 32 + xorl %edx,%edi + xorl %ebx,%edi + leal 4294588738(%eax,%ebp,1),%eax + addl %edi,%eax + movl 32(%esi),%ebp + roll $4,%eax + movl %ebx,%edi + # R2 33 + addl %ebx,%eax + xorl %ecx,%edi + leal 2272392833(%edx,%ebp,1),%edx + xorl %eax,%edi + movl 44(%esi),%ebp + addl %edi,%edx + movl %eax,%edi + roll $11,%edx + addl %eax,%edx + # R2 34 + xorl %ebx,%edi + xorl %edx,%edi + leal 1839030562(%ecx,%ebp,1),%ecx + addl %edi,%ecx + movl 56(%esi),%ebp + roll $16,%ecx + movl %edx,%edi + # R2 35 + addl %edx,%ecx + xorl %eax,%edi + leal 4259657740(%ebx,%ebp,1),%ebx + xorl %ecx,%edi + movl 4(%esi),%ebp + addl %edi,%ebx + movl %ecx,%edi + roll $23,%ebx + addl %ecx,%ebx + # R2 36 + xorl %edx,%edi + xorl %ebx,%edi + leal 2763975236(%eax,%ebp,1),%eax + addl %edi,%eax + movl 16(%esi),%ebp + roll $4,%eax + movl %ebx,%edi + # R2 37 + addl %ebx,%eax + xorl %ecx,%edi + leal 1272893353(%edx,%ebp,1),%edx + xorl %eax,%edi + movl 28(%esi),%ebp + addl %edi,%edx + movl %eax,%edi + roll $11,%edx + addl %eax,%edx + # R2 38 + xorl %ebx,%edi + xorl %edx,%edi + leal 4139469664(%ecx,%ebp,1),%ecx + addl %edi,%ecx + movl 40(%esi),%ebp + roll $16,%ecx + movl %edx,%edi + # R2 39 + addl %edx,%ecx + xorl %eax,%edi + leal 3200236656(%ebx,%ebp,1),%ebx + xorl %ecx,%edi + movl 52(%esi),%ebp + addl %edi,%ebx + movl %ecx,%edi + roll $23,%ebx + addl %ecx,%ebx + # R2 40 + xorl %edx,%edi + xorl %ebx,%edi + leal 681279174(%eax,%ebp,1),%eax + addl %edi,%eax + movl (%esi),%ebp + roll $4,%eax + movl %ebx,%edi + # R2 41 + addl %ebx,%eax + xorl %ecx,%edi + leal 3936430074(%edx,%ebp,1),%edx + xorl %eax,%edi + movl 12(%esi),%ebp + addl %edi,%edx + movl %eax,%edi + roll $11,%edx + addl %eax,%edx + # R2 42 + xorl %ebx,%edi + xorl %edx,%edi + leal 3572445317(%ecx,%ebp,1),%ecx + addl %edi,%ecx + movl 24(%esi),%ebp + roll $16,%ecx + movl %edx,%edi + # R2 43 + addl %edx,%ecx + xorl %eax,%edi + leal 76029189(%ebx,%ebp,1),%ebx + xorl %ecx,%edi + movl 36(%esi),%ebp + addl %edi,%ebx + movl %ecx,%edi + roll $23,%ebx + addl %ecx,%ebx + # R2 44 + xorl %edx,%edi + xorl %ebx,%edi + leal 3654602809(%eax,%ebp,1),%eax + addl %edi,%eax + movl 48(%esi),%ebp + roll $4,%eax + movl %ebx,%edi + # R2 45 + addl %ebx,%eax + xorl %ecx,%edi + leal 3873151461(%edx,%ebp,1),%edx + xorl %eax,%edi + movl 60(%esi),%ebp + addl %edi,%edx + movl %eax,%edi + roll $11,%edx + addl %eax,%edx + # R2 46 + xorl %ebx,%edi + xorl %edx,%edi + leal 530742520(%ecx,%ebp,1),%ecx + addl %edi,%ecx + movl 8(%esi),%ebp + roll $16,%ecx + movl %edx,%edi + # R2 47 + addl %edx,%ecx + xorl %eax,%edi + leal 3299628645(%ebx,%ebp,1),%ebx + xorl %ecx,%edi + movl (%esi),%ebp + addl %edi,%ebx + movl $-1,%edi + roll $23,%ebx + addl %ecx,%ebx + + # R3 section + # R3 48 + xorl %edx,%edi + orl %ebx,%edi + leal 4096336452(%eax,%ebp,1),%eax + xorl %ecx,%edi + movl 28(%esi),%ebp + addl %edi,%eax + movl $-1,%edi + roll $6,%eax + xorl %ecx,%edi + addl %ebx,%eax + # R3 49 + orl %eax,%edi + leal 1126891415(%edx,%ebp,1),%edx + xorl %ebx,%edi + movl 56(%esi),%ebp + addl %edi,%edx + movl $-1,%edi + roll $10,%edx + xorl %ebx,%edi + addl %eax,%edx + # R3 50 + orl %edx,%edi + leal 2878612391(%ecx,%ebp,1),%ecx + xorl %eax,%edi + movl 20(%esi),%ebp + addl %edi,%ecx + movl $-1,%edi + roll $15,%ecx + xorl %eax,%edi + addl %edx,%ecx + # R3 51 + orl %ecx,%edi + leal 4237533241(%ebx,%ebp,1),%ebx + xorl %edx,%edi + movl 48(%esi),%ebp + addl %edi,%ebx + movl $-1,%edi + roll $21,%ebx + xorl %edx,%edi + addl %ecx,%ebx + # R3 52 + orl %ebx,%edi + leal 1700485571(%eax,%ebp,1),%eax + xorl %ecx,%edi + movl 12(%esi),%ebp + addl %edi,%eax + movl $-1,%edi + roll $6,%eax + xorl %ecx,%edi + addl %ebx,%eax + # R3 53 + orl %eax,%edi + leal 2399980690(%edx,%ebp,1),%edx + xorl %ebx,%edi + movl 40(%esi),%ebp + addl %edi,%edx + movl $-1,%edi + roll $10,%edx + xorl %ebx,%edi + addl %eax,%edx + # R3 54 + orl %edx,%edi + leal 4293915773(%ecx,%ebp,1),%ecx + xorl %eax,%edi + movl 4(%esi),%ebp + addl %edi,%ecx + movl $-1,%edi + roll $15,%ecx + xorl %eax,%edi + addl %edx,%ecx + # R3 55 + orl %ecx,%edi + leal 2240044497(%ebx,%ebp,1),%ebx + xorl %edx,%edi + movl 32(%esi),%ebp + addl %edi,%ebx + movl $-1,%edi + roll $21,%ebx + xorl %edx,%edi + addl %ecx,%ebx + # R3 56 + orl %ebx,%edi + leal 1873313359(%eax,%ebp,1),%eax + xorl %ecx,%edi + movl 60(%esi),%ebp + addl %edi,%eax + movl $-1,%edi + roll $6,%eax + xorl %ecx,%edi + addl %ebx,%eax + # R3 57 + orl %eax,%edi + leal 4264355552(%edx,%ebp,1),%edx + xorl %ebx,%edi + movl 24(%esi),%ebp + addl %edi,%edx + movl $-1,%edi + roll $10,%edx + xorl %ebx,%edi + addl %eax,%edx + # R3 58 + orl %edx,%edi + leal 2734768916(%ecx,%ebp,1),%ecx + xorl %eax,%edi + movl 52(%esi),%ebp + addl %edi,%ecx + movl $-1,%edi + roll $15,%ecx + xorl %eax,%edi + addl %edx,%ecx + # R3 59 + orl %ecx,%edi + leal 1309151649(%ebx,%ebp,1),%ebx + xorl %edx,%edi + movl 16(%esi),%ebp + addl %edi,%ebx + movl $-1,%edi + roll $21,%ebx + xorl %edx,%edi + addl %ecx,%ebx + # R3 60 + orl %ebx,%edi + leal 4149444226(%eax,%ebp,1),%eax + xorl %ecx,%edi + movl 44(%esi),%ebp + addl %edi,%eax + movl $-1,%edi + roll $6,%eax + xorl %ecx,%edi + addl %ebx,%eax + # R3 61 + orl %eax,%edi + leal 3174756917(%edx,%ebp,1),%edx + xorl %ebx,%edi + movl 8(%esi),%ebp + addl %edi,%edx + movl $-1,%edi + roll $10,%edx + xorl %ebx,%edi + addl %eax,%edx + # R3 62 + orl %edx,%edi + leal 718787259(%ecx,%ebp,1),%ecx + xorl %eax,%edi + movl 36(%esi),%ebp + addl %edi,%ecx + movl $-1,%edi + roll $15,%ecx + xorl %eax,%edi + addl %edx,%ecx + # R3 63 + orl %ecx,%edi + leal 3951481745(%ebx,%ebp,1),%ebx + xorl %edx,%edi + movl 24(%esp),%ebp + addl %edi,%ebx + addl $64,%esi + roll $21,%ebx + movl (%ebp),%edi + addl %ecx,%ebx + addl %edi,%eax + movl 4(%ebp),%edi + addl %edi,%ebx + movl 8(%ebp),%edi + addl %edi,%ecx + movl 12(%ebp),%edi + addl %edi,%edx + movl %eax,(%ebp) + movl %ebx,4(%ebp) + movl (%esp),%edi + movl %ecx,8(%ebp) + movl %edx,12(%ebp) + cmpl %esi,%edi + jae L000start + popl %eax + popl %ebx + popl %ebp + popl %edi + popl %esi + ret diff --git a/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/ripemd/rmd-586.s b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/ripemd/rmd-586.s new file mode 100644 index 00000000000000..17603e38536262 --- /dev/null +++ b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/ripemd/rmd-586.s @@ -0,0 +1,1963 @@ +.text +.globl _ripemd160_block_asm_data_order +.type _ripemd160_block_asm_data_order,@function +.align 4 +_ripemd160_block_asm_data_order: +L_ripemd160_block_asm_data_order_begin: + movl 4(%esp),%edx + movl 8(%esp),%eax + pushl %esi + movl (%edx),%ecx + pushl %edi + movl 4(%edx),%esi + pushl %ebp + movl 8(%edx),%edi + pushl %ebx + subl $108,%esp +L000start: + + movl (%eax),%ebx + movl 4(%eax),%ebp + movl %ebx,(%esp) + movl %ebp,4(%esp) + movl 8(%eax),%ebx + movl 12(%eax),%ebp + movl %ebx,8(%esp) + movl %ebp,12(%esp) + movl 16(%eax),%ebx + movl 20(%eax),%ebp + movl %ebx,16(%esp) + movl %ebp,20(%esp) + movl 24(%eax),%ebx + movl 28(%eax),%ebp + movl %ebx,24(%esp) + movl %ebp,28(%esp) + movl 32(%eax),%ebx + movl 36(%eax),%ebp + movl %ebx,32(%esp) + movl %ebp,36(%esp) + movl 40(%eax),%ebx + movl 44(%eax),%ebp + movl %ebx,40(%esp) + movl %ebp,44(%esp) + movl 48(%eax),%ebx + movl 52(%eax),%ebp + movl %ebx,48(%esp) + movl %ebp,52(%esp) + movl 56(%eax),%ebx + movl 60(%eax),%ebp + movl %ebx,56(%esp) + movl %ebp,60(%esp) + movl %edi,%eax + movl 12(%edx),%ebx + movl 16(%edx),%ebp + # 0 + xorl %ebx,%eax + movl (%esp),%edx + xorl %esi,%eax + addl %edx,%ecx + roll $10,%edi + addl %eax,%ecx + movl %esi,%eax + roll $11,%ecx + addl %ebp,%ecx + # 1 + xorl %edi,%eax + movl 4(%esp),%edx + xorl %ecx,%eax + addl %eax,%ebp + movl %ecx,%eax + roll $10,%esi + addl %edx,%ebp + xorl %esi,%eax + roll $14,%ebp + addl %ebx,%ebp + # 2 + movl 8(%esp),%edx + xorl %ebp,%eax + addl %edx,%ebx + roll $10,%ecx + addl %eax,%ebx + movl %ebp,%eax + roll $15,%ebx + addl %edi,%ebx + # 3 + xorl %ecx,%eax + movl 12(%esp),%edx + xorl %ebx,%eax + addl %eax,%edi + movl %ebx,%eax + roll $10,%ebp + addl %edx,%edi + xorl %ebp,%eax + roll $12,%edi + addl %esi,%edi + # 4 + movl 16(%esp),%edx + xorl %edi,%eax + addl %edx,%esi + roll $10,%ebx + addl %eax,%esi + movl %edi,%eax + roll $5,%esi + addl %ecx,%esi + # 5 + xorl %ebx,%eax + movl 20(%esp),%edx + xorl %esi,%eax + addl %eax,%ecx + movl %esi,%eax + roll $10,%edi + addl %edx,%ecx + xorl %edi,%eax + roll $8,%ecx + addl %ebp,%ecx + # 6 + movl 24(%esp),%edx + xorl %ecx,%eax + addl %edx,%ebp + roll $10,%esi + addl %eax,%ebp + movl %ecx,%eax + roll $7,%ebp + addl %ebx,%ebp + # 7 + xorl %esi,%eax + movl 28(%esp),%edx + xorl %ebp,%eax + addl %eax,%ebx + movl %ebp,%eax + roll $10,%ecx + addl %edx,%ebx + xorl %ecx,%eax + roll $9,%ebx + addl %edi,%ebx + # 8 + movl 32(%esp),%edx + xorl %ebx,%eax + addl %edx,%edi + roll $10,%ebp + addl %eax,%edi + movl %ebx,%eax + roll $11,%edi + addl %esi,%edi + # 9 + xorl %ebp,%eax + movl 36(%esp),%edx + xorl %edi,%eax + addl %eax,%esi + movl %edi,%eax + roll $10,%ebx + addl %edx,%esi + xorl %ebx,%eax + roll $13,%esi + addl %ecx,%esi + # 10 + movl 40(%esp),%edx + xorl %esi,%eax + addl %edx,%ecx + roll $10,%edi + addl %eax,%ecx + movl %esi,%eax + roll $14,%ecx + addl %ebp,%ecx + # 11 + xorl %edi,%eax + movl 44(%esp),%edx + xorl %ecx,%eax + addl %eax,%ebp + movl %ecx,%eax + roll $10,%esi + addl %edx,%ebp + xorl %esi,%eax + roll $15,%ebp + addl %ebx,%ebp + # 12 + movl 48(%esp),%edx + xorl %ebp,%eax + addl %edx,%ebx + roll $10,%ecx + addl %eax,%ebx + movl %ebp,%eax + roll $6,%ebx + addl %edi,%ebx + # 13 + xorl %ecx,%eax + movl 52(%esp),%edx + xorl %ebx,%eax + addl %eax,%edi + movl %ebx,%eax + roll $10,%ebp + addl %edx,%edi + xorl %ebp,%eax + roll $7,%edi + addl %esi,%edi + # 14 + movl 56(%esp),%edx + xorl %edi,%eax + addl %edx,%esi + roll $10,%ebx + addl %eax,%esi + movl %edi,%eax + roll $9,%esi + addl %ecx,%esi + # 15 + xorl %ebx,%eax + movl 60(%esp),%edx + xorl %esi,%eax + addl %eax,%ecx + movl $-1,%eax + roll $10,%edi + addl %edx,%ecx + movl 28(%esp),%edx + roll $8,%ecx + addl %ebp,%ecx + # 16 + addl %edx,%ebp + movl %esi,%edx + subl %ecx,%eax + andl %ecx,%edx + andl %edi,%eax + orl %eax,%edx + movl 16(%esp),%eax + roll $10,%esi + leal 1518500249(%ebp,%edx,1),%ebp + movl $-1,%edx + roll $7,%ebp + addl %ebx,%ebp + # 17 + addl %eax,%ebx + movl %ecx,%eax + subl %ebp,%edx + andl %ebp,%eax + andl %esi,%edx + orl %edx,%eax + movl 52(%esp),%edx + roll $10,%ecx + leal 1518500249(%ebx,%eax,1),%ebx + movl $-1,%eax + roll $6,%ebx + addl %edi,%ebx + # 18 + addl %edx,%edi + movl %ebp,%edx + subl %ebx,%eax + andl %ebx,%edx + andl %ecx,%eax + orl %eax,%edx + movl 4(%esp),%eax + roll $10,%ebp + leal 1518500249(%edi,%edx,1),%edi + movl $-1,%edx + roll $8,%edi + addl %esi,%edi + # 19 + addl %eax,%esi + movl %ebx,%eax + subl %edi,%edx + andl %edi,%eax + andl %ebp,%edx + orl %edx,%eax + movl 40(%esp),%edx + roll $10,%ebx + leal 1518500249(%esi,%eax,1),%esi + movl $-1,%eax + roll $13,%esi + addl %ecx,%esi + # 20 + addl %edx,%ecx + movl %edi,%edx + subl %esi,%eax + andl %esi,%edx + andl %ebx,%eax + orl %eax,%edx + movl 24(%esp),%eax + roll $10,%edi + leal 1518500249(%ecx,%edx,1),%ecx + movl $-1,%edx + roll $11,%ecx + addl %ebp,%ecx + # 21 + addl %eax,%ebp + movl %esi,%eax + subl %ecx,%edx + andl %ecx,%eax + andl %edi,%edx + orl %edx,%eax + movl 60(%esp),%edx + roll $10,%esi + leal 1518500249(%ebp,%eax,1),%ebp + movl $-1,%eax + roll $9,%ebp + addl %ebx,%ebp + # 22 + addl %edx,%ebx + movl %ecx,%edx + subl %ebp,%eax + andl %ebp,%edx + andl %esi,%eax + orl %eax,%edx + movl 12(%esp),%eax + roll $10,%ecx + leal 1518500249(%ebx,%edx,1),%ebx + movl $-1,%edx + roll $7,%ebx + addl %edi,%ebx + # 23 + addl %eax,%edi + movl %ebp,%eax + subl %ebx,%edx + andl %ebx,%eax + andl %ecx,%edx + orl %edx,%eax + movl 48(%esp),%edx + roll $10,%ebp + leal 1518500249(%edi,%eax,1),%edi + movl $-1,%eax + roll $15,%edi + addl %esi,%edi + # 24 + addl %edx,%esi + movl %ebx,%edx + subl %edi,%eax + andl %edi,%edx + andl %ebp,%eax + orl %eax,%edx + movl (%esp),%eax + roll $10,%ebx + leal 1518500249(%esi,%edx,1),%esi + movl $-1,%edx + roll $7,%esi + addl %ecx,%esi + # 25 + addl %eax,%ecx + movl %edi,%eax + subl %esi,%edx + andl %esi,%eax + andl %ebx,%edx + orl %edx,%eax + movl 36(%esp),%edx + roll $10,%edi + leal 1518500249(%ecx,%eax,1),%ecx + movl $-1,%eax + roll $12,%ecx + addl %ebp,%ecx + # 26 + addl %edx,%ebp + movl %esi,%edx + subl %ecx,%eax + andl %ecx,%edx + andl %edi,%eax + orl %eax,%edx + movl 20(%esp),%eax + roll $10,%esi + leal 1518500249(%ebp,%edx,1),%ebp + movl $-1,%edx + roll $15,%ebp + addl %ebx,%ebp + # 27 + addl %eax,%ebx + movl %ecx,%eax + subl %ebp,%edx + andl %ebp,%eax + andl %esi,%edx + orl %edx,%eax + movl 8(%esp),%edx + roll $10,%ecx + leal 1518500249(%ebx,%eax,1),%ebx + movl $-1,%eax + roll $9,%ebx + addl %edi,%ebx + # 28 + addl %edx,%edi + movl %ebp,%edx + subl %ebx,%eax + andl %ebx,%edx + andl %ecx,%eax + orl %eax,%edx + movl 56(%esp),%eax + roll $10,%ebp + leal 1518500249(%edi,%edx,1),%edi + movl $-1,%edx + roll $11,%edi + addl %esi,%edi + # 29 + addl %eax,%esi + movl %ebx,%eax + subl %edi,%edx + andl %edi,%eax + andl %ebp,%edx + orl %edx,%eax + movl 44(%esp),%edx + roll $10,%ebx + leal 1518500249(%esi,%eax,1),%esi + movl $-1,%eax + roll $7,%esi + addl %ecx,%esi + # 30 + addl %edx,%ecx + movl %edi,%edx + subl %esi,%eax + andl %esi,%edx + andl %ebx,%eax + orl %eax,%edx + movl 32(%esp),%eax + roll $10,%edi + leal 1518500249(%ecx,%edx,1),%ecx + movl $-1,%edx + roll $13,%ecx + addl %ebp,%ecx + # 31 + addl %eax,%ebp + movl %esi,%eax + subl %ecx,%edx + andl %ecx,%eax + andl %edi,%edx + orl %edx,%eax + movl $-1,%edx + roll $10,%esi + leal 1518500249(%ebp,%eax,1),%ebp + subl %ecx,%edx + roll $12,%ebp + addl %ebx,%ebp + # 32 + movl 12(%esp),%eax + orl %ebp,%edx + addl %eax,%ebx + xorl %esi,%edx + movl $-1,%eax + roll $10,%ecx + leal 1859775393(%ebx,%edx,1),%ebx + subl %ebp,%eax + roll $11,%ebx + addl %edi,%ebx + # 33 + movl 40(%esp),%edx + orl %ebx,%eax + addl %edx,%edi + xorl %ecx,%eax + movl $-1,%edx + roll $10,%ebp + leal 1859775393(%edi,%eax,1),%edi + subl %ebx,%edx + roll $13,%edi + addl %esi,%edi + # 34 + movl 56(%esp),%eax + orl %edi,%edx + addl %eax,%esi + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ebx + leal 1859775393(%esi,%edx,1),%esi + subl %edi,%eax + roll $6,%esi + addl %ecx,%esi + # 35 + movl 16(%esp),%edx + orl %esi,%eax + addl %edx,%ecx + xorl %ebx,%eax + movl $-1,%edx + roll $10,%edi + leal 1859775393(%ecx,%eax,1),%ecx + subl %esi,%edx + roll $7,%ecx + addl %ebp,%ecx + # 36 + movl 36(%esp),%eax + orl %ecx,%edx + addl %eax,%ebp + xorl %edi,%edx + movl $-1,%eax + roll $10,%esi + leal 1859775393(%ebp,%edx,1),%ebp + subl %ecx,%eax + roll $14,%ebp + addl %ebx,%ebp + # 37 + movl 60(%esp),%edx + orl %ebp,%eax + addl %edx,%ebx + xorl %esi,%eax + movl $-1,%edx + roll $10,%ecx + leal 1859775393(%ebx,%eax,1),%ebx + subl %ebp,%edx + roll $9,%ebx + addl %edi,%ebx + # 38 + movl 32(%esp),%eax + orl %ebx,%edx + addl %eax,%edi + xorl %ecx,%edx + movl $-1,%eax + roll $10,%ebp + leal 1859775393(%edi,%edx,1),%edi + subl %ebx,%eax + roll $13,%edi + addl %esi,%edi + # 39 + movl 4(%esp),%edx + orl %edi,%eax + addl %edx,%esi + xorl %ebp,%eax + movl $-1,%edx + roll $10,%ebx + leal 1859775393(%esi,%eax,1),%esi + subl %edi,%edx + roll $15,%esi + addl %ecx,%esi + # 40 + movl 8(%esp),%eax + orl %esi,%edx + addl %eax,%ecx + xorl %ebx,%edx + movl $-1,%eax + roll $10,%edi + leal 1859775393(%ecx,%edx,1),%ecx + subl %esi,%eax + roll $14,%ecx + addl %ebp,%ecx + # 41 + movl 28(%esp),%edx + orl %ecx,%eax + addl %edx,%ebp + xorl %edi,%eax + movl $-1,%edx + roll $10,%esi + leal 1859775393(%ebp,%eax,1),%ebp + subl %ecx,%edx + roll $8,%ebp + addl %ebx,%ebp + # 42 + movl (%esp),%eax + orl %ebp,%edx + addl %eax,%ebx + xorl %esi,%edx + movl $-1,%eax + roll $10,%ecx + leal 1859775393(%ebx,%edx,1),%ebx + subl %ebp,%eax + roll $13,%ebx + addl %edi,%ebx + # 43 + movl 24(%esp),%edx + orl %ebx,%eax + addl %edx,%edi + xorl %ecx,%eax + movl $-1,%edx + roll $10,%ebp + leal 1859775393(%edi,%eax,1),%edi + subl %ebx,%edx + roll $6,%edi + addl %esi,%edi + # 44 + movl 52(%esp),%eax + orl %edi,%edx + addl %eax,%esi + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ebx + leal 1859775393(%esi,%edx,1),%esi + subl %edi,%eax + roll $5,%esi + addl %ecx,%esi + # 45 + movl 44(%esp),%edx + orl %esi,%eax + addl %edx,%ecx + xorl %ebx,%eax + movl $-1,%edx + roll $10,%edi + leal 1859775393(%ecx,%eax,1),%ecx + subl %esi,%edx + roll $12,%ecx + addl %ebp,%ecx + # 46 + movl 20(%esp),%eax + orl %ecx,%edx + addl %eax,%ebp + xorl %edi,%edx + movl $-1,%eax + roll $10,%esi + leal 1859775393(%ebp,%edx,1),%ebp + subl %ecx,%eax + roll $7,%ebp + addl %ebx,%ebp + # 47 + movl 48(%esp),%edx + orl %ebp,%eax + addl %edx,%ebx + xorl %esi,%eax + movl $-1,%edx + roll $10,%ecx + leal 1859775393(%ebx,%eax,1),%ebx + movl %ecx,%eax + roll $5,%ebx + addl %edi,%ebx + # 48 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 4(%esp),%eax + roll $10,%ebp + leal 2400959708(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + movl %ebp,%eax + roll $11,%edi + addl %esi,%edi + # 49 + subl %ebp,%edx + andl %edi,%eax + andl %ebx,%edx + orl %eax,%edx + movl 36(%esp),%eax + roll $10,%ebx + leal 2400959708(%esi,%edx,1),%esi + movl $-1,%edx + addl %eax,%esi + movl %ebx,%eax + roll $12,%esi + addl %ecx,%esi + # 50 + subl %ebx,%edx + andl %esi,%eax + andl %edi,%edx + orl %eax,%edx + movl 44(%esp),%eax + roll $10,%edi + leal 2400959708(%ecx,%edx,1),%ecx + movl $-1,%edx + addl %eax,%ecx + movl %edi,%eax + roll $14,%ecx + addl %ebp,%ecx + # 51 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 40(%esp),%eax + roll $10,%esi + leal 2400959708(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + movl %esi,%eax + roll $15,%ebp + addl %ebx,%ebp + # 52 + subl %esi,%edx + andl %ebp,%eax + andl %ecx,%edx + orl %eax,%edx + movl (%esp),%eax + roll $10,%ecx + leal 2400959708(%ebx,%edx,1),%ebx + movl $-1,%edx + addl %eax,%ebx + movl %ecx,%eax + roll $14,%ebx + addl %edi,%ebx + # 53 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 32(%esp),%eax + roll $10,%ebp + leal 2400959708(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + movl %ebp,%eax + roll $15,%edi + addl %esi,%edi + # 54 + subl %ebp,%edx + andl %edi,%eax + andl %ebx,%edx + orl %eax,%edx + movl 48(%esp),%eax + roll $10,%ebx + leal 2400959708(%esi,%edx,1),%esi + movl $-1,%edx + addl %eax,%esi + movl %ebx,%eax + roll $9,%esi + addl %ecx,%esi + # 55 + subl %ebx,%edx + andl %esi,%eax + andl %edi,%edx + orl %eax,%edx + movl 16(%esp),%eax + roll $10,%edi + leal 2400959708(%ecx,%edx,1),%ecx + movl $-1,%edx + addl %eax,%ecx + movl %edi,%eax + roll $8,%ecx + addl %ebp,%ecx + # 56 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 52(%esp),%eax + roll $10,%esi + leal 2400959708(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + movl %esi,%eax + roll $9,%ebp + addl %ebx,%ebp + # 57 + subl %esi,%edx + andl %ebp,%eax + andl %ecx,%edx + orl %eax,%edx + movl 12(%esp),%eax + roll $10,%ecx + leal 2400959708(%ebx,%edx,1),%ebx + movl $-1,%edx + addl %eax,%ebx + movl %ecx,%eax + roll $14,%ebx + addl %edi,%ebx + # 58 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 28(%esp),%eax + roll $10,%ebp + leal 2400959708(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + movl %ebp,%eax + roll $5,%edi + addl %esi,%edi + # 59 + subl %ebp,%edx + andl %edi,%eax + andl %ebx,%edx + orl %eax,%edx + movl 60(%esp),%eax + roll $10,%ebx + leal 2400959708(%esi,%edx,1),%esi + movl $-1,%edx + addl %eax,%esi + movl %ebx,%eax + roll $6,%esi + addl %ecx,%esi + # 60 + subl %ebx,%edx + andl %esi,%eax + andl %edi,%edx + orl %eax,%edx + movl 56(%esp),%eax + roll $10,%edi + leal 2400959708(%ecx,%edx,1),%ecx + movl $-1,%edx + addl %eax,%ecx + movl %edi,%eax + roll $8,%ecx + addl %ebp,%ecx + # 61 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 20(%esp),%eax + roll $10,%esi + leal 2400959708(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + movl %esi,%eax + roll $6,%ebp + addl %ebx,%ebp + # 62 + subl %esi,%edx + andl %ebp,%eax + andl %ecx,%edx + orl %eax,%edx + movl 24(%esp),%eax + roll $10,%ecx + leal 2400959708(%ebx,%edx,1),%ebx + movl $-1,%edx + addl %eax,%ebx + movl %ecx,%eax + roll $5,%ebx + addl %edi,%ebx + # 63 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 8(%esp),%eax + roll $10,%ebp + leal 2400959708(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + subl %ebp,%edx + roll $12,%edi + addl %esi,%edi + # 64 + movl 16(%esp),%eax + orl %ebx,%edx + addl %eax,%esi + xorl %edi,%edx + movl $-1,%eax + roll $10,%ebx + leal 2840853838(%esi,%edx,1),%esi + subl %ebx,%eax + roll $9,%esi + addl %ecx,%esi + # 65 + movl (%esp),%edx + orl %edi,%eax + addl %edx,%ecx + xorl %esi,%eax + movl $-1,%edx + roll $10,%edi + leal 2840853838(%ecx,%eax,1),%ecx + subl %edi,%edx + roll $15,%ecx + addl %ebp,%ecx + # 66 + movl 20(%esp),%eax + orl %esi,%edx + addl %eax,%ebp + xorl %ecx,%edx + movl $-1,%eax + roll $10,%esi + leal 2840853838(%ebp,%edx,1),%ebp + subl %esi,%eax + roll $5,%ebp + addl %ebx,%ebp + # 67 + movl 36(%esp),%edx + orl %ecx,%eax + addl %edx,%ebx + xorl %ebp,%eax + movl $-1,%edx + roll $10,%ecx + leal 2840853838(%ebx,%eax,1),%ebx + subl %ecx,%edx + roll $11,%ebx + addl %edi,%ebx + # 68 + movl 28(%esp),%eax + orl %ebp,%edx + addl %eax,%edi + xorl %ebx,%edx + movl $-1,%eax + roll $10,%ebp + leal 2840853838(%edi,%edx,1),%edi + subl %ebp,%eax + roll $6,%edi + addl %esi,%edi + # 69 + movl 48(%esp),%edx + orl %ebx,%eax + addl %edx,%esi + xorl %edi,%eax + movl $-1,%edx + roll $10,%ebx + leal 2840853838(%esi,%eax,1),%esi + subl %ebx,%edx + roll $8,%esi + addl %ecx,%esi + # 70 + movl 8(%esp),%eax + orl %edi,%edx + addl %eax,%ecx + xorl %esi,%edx + movl $-1,%eax + roll $10,%edi + leal 2840853838(%ecx,%edx,1),%ecx + subl %edi,%eax + roll $13,%ecx + addl %ebp,%ecx + # 71 + movl 40(%esp),%edx + orl %esi,%eax + addl %edx,%ebp + xorl %ecx,%eax + movl $-1,%edx + roll $10,%esi + leal 2840853838(%ebp,%eax,1),%ebp + subl %esi,%edx + roll $12,%ebp + addl %ebx,%ebp + # 72 + movl 56(%esp),%eax + orl %ecx,%edx + addl %eax,%ebx + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ecx + leal 2840853838(%ebx,%edx,1),%ebx + subl %ecx,%eax + roll $5,%ebx + addl %edi,%ebx + # 73 + movl 4(%esp),%edx + orl %ebp,%eax + addl %edx,%edi + xorl %ebx,%eax + movl $-1,%edx + roll $10,%ebp + leal 2840853838(%edi,%eax,1),%edi + subl %ebp,%edx + roll $12,%edi + addl %esi,%edi + # 74 + movl 12(%esp),%eax + orl %ebx,%edx + addl %eax,%esi + xorl %edi,%edx + movl $-1,%eax + roll $10,%ebx + leal 2840853838(%esi,%edx,1),%esi + subl %ebx,%eax + roll $13,%esi + addl %ecx,%esi + # 75 + movl 32(%esp),%edx + orl %edi,%eax + addl %edx,%ecx + xorl %esi,%eax + movl $-1,%edx + roll $10,%edi + leal 2840853838(%ecx,%eax,1),%ecx + subl %edi,%edx + roll $14,%ecx + addl %ebp,%ecx + # 76 + movl 44(%esp),%eax + orl %esi,%edx + addl %eax,%ebp + xorl %ecx,%edx + movl $-1,%eax + roll $10,%esi + leal 2840853838(%ebp,%edx,1),%ebp + subl %esi,%eax + roll $11,%ebp + addl %ebx,%ebp + # 77 + movl 24(%esp),%edx + orl %ecx,%eax + addl %edx,%ebx + xorl %ebp,%eax + movl $-1,%edx + roll $10,%ecx + leal 2840853838(%ebx,%eax,1),%ebx + subl %ecx,%edx + roll $8,%ebx + addl %edi,%ebx + # 78 + movl 60(%esp),%eax + orl %ebp,%edx + addl %eax,%edi + xorl %ebx,%edx + movl $-1,%eax + roll $10,%ebp + leal 2840853838(%edi,%edx,1),%edi + subl %ebp,%eax + roll $5,%edi + addl %esi,%edi + # 79 + movl 52(%esp),%edx + orl %ebx,%eax + addl %edx,%esi + xorl %edi,%eax + movl 128(%esp),%edx + roll $10,%ebx + leal 2840853838(%esi,%eax,1),%esi + movl %ecx,64(%esp) + roll $6,%esi + addl %ecx,%esi + movl (%edx),%ecx + movl %esi,68(%esp) + movl %edi,72(%esp) + movl 4(%edx),%esi + movl %ebx,76(%esp) + movl 8(%edx),%edi + movl %ebp,80(%esp) + movl 12(%edx),%ebx + movl 16(%edx),%ebp + # 80 + movl $-1,%edx + subl %ebx,%edx + movl 20(%esp),%eax + orl %edi,%edx + addl %eax,%ecx + xorl %esi,%edx + movl $-1,%eax + roll $10,%edi + leal 1352829926(%ecx,%edx,1),%ecx + subl %edi,%eax + roll $8,%ecx + addl %ebp,%ecx + # 81 + movl 56(%esp),%edx + orl %esi,%eax + addl %edx,%ebp + xorl %ecx,%eax + movl $-1,%edx + roll $10,%esi + leal 1352829926(%ebp,%eax,1),%ebp + subl %esi,%edx + roll $9,%ebp + addl %ebx,%ebp + # 82 + movl 28(%esp),%eax + orl %ecx,%edx + addl %eax,%ebx + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ecx + leal 1352829926(%ebx,%edx,1),%ebx + subl %ecx,%eax + roll $9,%ebx + addl %edi,%ebx + # 83 + movl (%esp),%edx + orl %ebp,%eax + addl %edx,%edi + xorl %ebx,%eax + movl $-1,%edx + roll $10,%ebp + leal 1352829926(%edi,%eax,1),%edi + subl %ebp,%edx + roll $11,%edi + addl %esi,%edi + # 84 + movl 36(%esp),%eax + orl %ebx,%edx + addl %eax,%esi + xorl %edi,%edx + movl $-1,%eax + roll $10,%ebx + leal 1352829926(%esi,%edx,1),%esi + subl %ebx,%eax + roll $13,%esi + addl %ecx,%esi + # 85 + movl 8(%esp),%edx + orl %edi,%eax + addl %edx,%ecx + xorl %esi,%eax + movl $-1,%edx + roll $10,%edi + leal 1352829926(%ecx,%eax,1),%ecx + subl %edi,%edx + roll $15,%ecx + addl %ebp,%ecx + # 86 + movl 44(%esp),%eax + orl %esi,%edx + addl %eax,%ebp + xorl %ecx,%edx + movl $-1,%eax + roll $10,%esi + leal 1352829926(%ebp,%edx,1),%ebp + subl %esi,%eax + roll $15,%ebp + addl %ebx,%ebp + # 87 + movl 16(%esp),%edx + orl %ecx,%eax + addl %edx,%ebx + xorl %ebp,%eax + movl $-1,%edx + roll $10,%ecx + leal 1352829926(%ebx,%eax,1),%ebx + subl %ecx,%edx + roll $5,%ebx + addl %edi,%ebx + # 88 + movl 52(%esp),%eax + orl %ebp,%edx + addl %eax,%edi + xorl %ebx,%edx + movl $-1,%eax + roll $10,%ebp + leal 1352829926(%edi,%edx,1),%edi + subl %ebp,%eax + roll $7,%edi + addl %esi,%edi + # 89 + movl 24(%esp),%edx + orl %ebx,%eax + addl %edx,%esi + xorl %edi,%eax + movl $-1,%edx + roll $10,%ebx + leal 1352829926(%esi,%eax,1),%esi + subl %ebx,%edx + roll $7,%esi + addl %ecx,%esi + # 90 + movl 60(%esp),%eax + orl %edi,%edx + addl %eax,%ecx + xorl %esi,%edx + movl $-1,%eax + roll $10,%edi + leal 1352829926(%ecx,%edx,1),%ecx + subl %edi,%eax + roll $8,%ecx + addl %ebp,%ecx + # 91 + movl 32(%esp),%edx + orl %esi,%eax + addl %edx,%ebp + xorl %ecx,%eax + movl $-1,%edx + roll $10,%esi + leal 1352829926(%ebp,%eax,1),%ebp + subl %esi,%edx + roll $11,%ebp + addl %ebx,%ebp + # 92 + movl 4(%esp),%eax + orl %ecx,%edx + addl %eax,%ebx + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ecx + leal 1352829926(%ebx,%edx,1),%ebx + subl %ecx,%eax + roll $14,%ebx + addl %edi,%ebx + # 93 + movl 40(%esp),%edx + orl %ebp,%eax + addl %edx,%edi + xorl %ebx,%eax + movl $-1,%edx + roll $10,%ebp + leal 1352829926(%edi,%eax,1),%edi + subl %ebp,%edx + roll $14,%edi + addl %esi,%edi + # 94 + movl 12(%esp),%eax + orl %ebx,%edx + addl %eax,%esi + xorl %edi,%edx + movl $-1,%eax + roll $10,%ebx + leal 1352829926(%esi,%edx,1),%esi + subl %ebx,%eax + roll $12,%esi + addl %ecx,%esi + # 95 + movl 48(%esp),%edx + orl %edi,%eax + addl %edx,%ecx + xorl %esi,%eax + movl $-1,%edx + roll $10,%edi + leal 1352829926(%ecx,%eax,1),%ecx + movl %edi,%eax + roll $6,%ecx + addl %ebp,%ecx + # 96 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 24(%esp),%eax + roll $10,%esi + leal 1548603684(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + movl %esi,%eax + roll $9,%ebp + addl %ebx,%ebp + # 97 + subl %esi,%edx + andl %ebp,%eax + andl %ecx,%edx + orl %eax,%edx + movl 44(%esp),%eax + roll $10,%ecx + leal 1548603684(%ebx,%edx,1),%ebx + movl $-1,%edx + addl %eax,%ebx + movl %ecx,%eax + roll $13,%ebx + addl %edi,%ebx + # 98 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 12(%esp),%eax + roll $10,%ebp + leal 1548603684(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + movl %ebp,%eax + roll $15,%edi + addl %esi,%edi + # 99 + subl %ebp,%edx + andl %edi,%eax + andl %ebx,%edx + orl %eax,%edx + movl 28(%esp),%eax + roll $10,%ebx + leal 1548603684(%esi,%edx,1),%esi + movl $-1,%edx + addl %eax,%esi + movl %ebx,%eax + roll $7,%esi + addl %ecx,%esi + # 100 + subl %ebx,%edx + andl %esi,%eax + andl %edi,%edx + orl %eax,%edx + movl (%esp),%eax + roll $10,%edi + leal 1548603684(%ecx,%edx,1),%ecx + movl $-1,%edx + addl %eax,%ecx + movl %edi,%eax + roll $12,%ecx + addl %ebp,%ecx + # 101 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 52(%esp),%eax + roll $10,%esi + leal 1548603684(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + movl %esi,%eax + roll $8,%ebp + addl %ebx,%ebp + # 102 + subl %esi,%edx + andl %ebp,%eax + andl %ecx,%edx + orl %eax,%edx + movl 20(%esp),%eax + roll $10,%ecx + leal 1548603684(%ebx,%edx,1),%ebx + movl $-1,%edx + addl %eax,%ebx + movl %ecx,%eax + roll $9,%ebx + addl %edi,%ebx + # 103 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 40(%esp),%eax + roll $10,%ebp + leal 1548603684(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + movl %ebp,%eax + roll $11,%edi + addl %esi,%edi + # 104 + subl %ebp,%edx + andl %edi,%eax + andl %ebx,%edx + orl %eax,%edx + movl 56(%esp),%eax + roll $10,%ebx + leal 1548603684(%esi,%edx,1),%esi + movl $-1,%edx + addl %eax,%esi + movl %ebx,%eax + roll $7,%esi + addl %ecx,%esi + # 105 + subl %ebx,%edx + andl %esi,%eax + andl %edi,%edx + orl %eax,%edx + movl 60(%esp),%eax + roll $10,%edi + leal 1548603684(%ecx,%edx,1),%ecx + movl $-1,%edx + addl %eax,%ecx + movl %edi,%eax + roll $7,%ecx + addl %ebp,%ecx + # 106 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 32(%esp),%eax + roll $10,%esi + leal 1548603684(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + movl %esi,%eax + roll $12,%ebp + addl %ebx,%ebp + # 107 + subl %esi,%edx + andl %ebp,%eax + andl %ecx,%edx + orl %eax,%edx + movl 48(%esp),%eax + roll $10,%ecx + leal 1548603684(%ebx,%edx,1),%ebx + movl $-1,%edx + addl %eax,%ebx + movl %ecx,%eax + roll $7,%ebx + addl %edi,%ebx + # 108 + subl %ecx,%edx + andl %ebx,%eax + andl %ebp,%edx + orl %eax,%edx + movl 16(%esp),%eax + roll $10,%ebp + leal 1548603684(%edi,%edx,1),%edi + movl $-1,%edx + addl %eax,%edi + movl %ebp,%eax + roll $6,%edi + addl %esi,%edi + # 109 + subl %ebp,%edx + andl %edi,%eax + andl %ebx,%edx + orl %eax,%edx + movl 36(%esp),%eax + roll $10,%ebx + leal 1548603684(%esi,%edx,1),%esi + movl $-1,%edx + addl %eax,%esi + movl %ebx,%eax + roll $15,%esi + addl %ecx,%esi + # 110 + subl %ebx,%edx + andl %esi,%eax + andl %edi,%edx + orl %eax,%edx + movl 4(%esp),%eax + roll $10,%edi + leal 1548603684(%ecx,%edx,1),%ecx + movl $-1,%edx + addl %eax,%ecx + movl %edi,%eax + roll $13,%ecx + addl %ebp,%ecx + # 111 + subl %edi,%edx + andl %ecx,%eax + andl %esi,%edx + orl %eax,%edx + movl 8(%esp),%eax + roll $10,%esi + leal 1548603684(%ebp,%edx,1),%ebp + movl $-1,%edx + addl %eax,%ebp + subl %ecx,%edx + roll $11,%ebp + addl %ebx,%ebp + # 112 + movl 60(%esp),%eax + orl %ebp,%edx + addl %eax,%ebx + xorl %esi,%edx + movl $-1,%eax + roll $10,%ecx + leal 1836072691(%ebx,%edx,1),%ebx + subl %ebp,%eax + roll $9,%ebx + addl %edi,%ebx + # 113 + movl 20(%esp),%edx + orl %ebx,%eax + addl %edx,%edi + xorl %ecx,%eax + movl $-1,%edx + roll $10,%ebp + leal 1836072691(%edi,%eax,1),%edi + subl %ebx,%edx + roll $7,%edi + addl %esi,%edi + # 114 + movl 4(%esp),%eax + orl %edi,%edx + addl %eax,%esi + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ebx + leal 1836072691(%esi,%edx,1),%esi + subl %edi,%eax + roll $15,%esi + addl %ecx,%esi + # 115 + movl 12(%esp),%edx + orl %esi,%eax + addl %edx,%ecx + xorl %ebx,%eax + movl $-1,%edx + roll $10,%edi + leal 1836072691(%ecx,%eax,1),%ecx + subl %esi,%edx + roll $11,%ecx + addl %ebp,%ecx + # 116 + movl 28(%esp),%eax + orl %ecx,%edx + addl %eax,%ebp + xorl %edi,%edx + movl $-1,%eax + roll $10,%esi + leal 1836072691(%ebp,%edx,1),%ebp + subl %ecx,%eax + roll $8,%ebp + addl %ebx,%ebp + # 117 + movl 56(%esp),%edx + orl %ebp,%eax + addl %edx,%ebx + xorl %esi,%eax + movl $-1,%edx + roll $10,%ecx + leal 1836072691(%ebx,%eax,1),%ebx + subl %ebp,%edx + roll $6,%ebx + addl %edi,%ebx + # 118 + movl 24(%esp),%eax + orl %ebx,%edx + addl %eax,%edi + xorl %ecx,%edx + movl $-1,%eax + roll $10,%ebp + leal 1836072691(%edi,%edx,1),%edi + subl %ebx,%eax + roll $6,%edi + addl %esi,%edi + # 119 + movl 36(%esp),%edx + orl %edi,%eax + addl %edx,%esi + xorl %ebp,%eax + movl $-1,%edx + roll $10,%ebx + leal 1836072691(%esi,%eax,1),%esi + subl %edi,%edx + roll $14,%esi + addl %ecx,%esi + # 120 + movl 44(%esp),%eax + orl %esi,%edx + addl %eax,%ecx + xorl %ebx,%edx + movl $-1,%eax + roll $10,%edi + leal 1836072691(%ecx,%edx,1),%ecx + subl %esi,%eax + roll $12,%ecx + addl %ebp,%ecx + # 121 + movl 32(%esp),%edx + orl %ecx,%eax + addl %edx,%ebp + xorl %edi,%eax + movl $-1,%edx + roll $10,%esi + leal 1836072691(%ebp,%eax,1),%ebp + subl %ecx,%edx + roll $13,%ebp + addl %ebx,%ebp + # 122 + movl 48(%esp),%eax + orl %ebp,%edx + addl %eax,%ebx + xorl %esi,%edx + movl $-1,%eax + roll $10,%ecx + leal 1836072691(%ebx,%edx,1),%ebx + subl %ebp,%eax + roll $5,%ebx + addl %edi,%ebx + # 123 + movl 8(%esp),%edx + orl %ebx,%eax + addl %edx,%edi + xorl %ecx,%eax + movl $-1,%edx + roll $10,%ebp + leal 1836072691(%edi,%eax,1),%edi + subl %ebx,%edx + roll $14,%edi + addl %esi,%edi + # 124 + movl 40(%esp),%eax + orl %edi,%edx + addl %eax,%esi + xorl %ebp,%edx + movl $-1,%eax + roll $10,%ebx + leal 1836072691(%esi,%edx,1),%esi + subl %edi,%eax + roll $13,%esi + addl %ecx,%esi + # 125 + movl (%esp),%edx + orl %esi,%eax + addl %edx,%ecx + xorl %ebx,%eax + movl $-1,%edx + roll $10,%edi + leal 1836072691(%ecx,%eax,1),%ecx + subl %esi,%edx + roll $13,%ecx + addl %ebp,%ecx + # 126 + movl 16(%esp),%eax + orl %ecx,%edx + addl %eax,%ebp + xorl %edi,%edx + movl $-1,%eax + roll $10,%esi + leal 1836072691(%ebp,%edx,1),%ebp + subl %ecx,%eax + roll $7,%ebp + addl %ebx,%ebp + # 127 + movl 52(%esp),%edx + orl %ebp,%eax + addl %edx,%ebx + xorl %esi,%eax + movl 32(%esp),%edx + roll $10,%ecx + leal 1836072691(%ebx,%eax,1),%ebx + movl $-1,%eax + roll $5,%ebx + addl %edi,%ebx + # 128 + addl %edx,%edi + movl %ebp,%edx + subl %ebx,%eax + andl %ebx,%edx + andl %ecx,%eax + orl %eax,%edx + movl 24(%esp),%eax + roll $10,%ebp + leal 2053994217(%edi,%edx,1),%edi + movl $-1,%edx + roll $15,%edi + addl %esi,%edi + # 129 + addl %eax,%esi + movl %ebx,%eax + subl %edi,%edx + andl %edi,%eax + andl %ebp,%edx + orl %edx,%eax + movl 16(%esp),%edx + roll $10,%ebx + leal 2053994217(%esi,%eax,1),%esi + movl $-1,%eax + roll $5,%esi + addl %ecx,%esi + # 130 + addl %edx,%ecx + movl %edi,%edx + subl %esi,%eax + andl %esi,%edx + andl %ebx,%eax + orl %eax,%edx + movl 4(%esp),%eax + roll $10,%edi + leal 2053994217(%ecx,%edx,1),%ecx + movl $-1,%edx + roll $8,%ecx + addl %ebp,%ecx + # 131 + addl %eax,%ebp + movl %esi,%eax + subl %ecx,%edx + andl %ecx,%eax + andl %edi,%edx + orl %edx,%eax + movl 12(%esp),%edx + roll $10,%esi + leal 2053994217(%ebp,%eax,1),%ebp + movl $-1,%eax + roll $11,%ebp + addl %ebx,%ebp + # 132 + addl %edx,%ebx + movl %ecx,%edx + subl %ebp,%eax + andl %ebp,%edx + andl %esi,%eax + orl %eax,%edx + movl 44(%esp),%eax + roll $10,%ecx + leal 2053994217(%ebx,%edx,1),%ebx + movl $-1,%edx + roll $14,%ebx + addl %edi,%ebx + # 133 + addl %eax,%edi + movl %ebp,%eax + subl %ebx,%edx + andl %ebx,%eax + andl %ecx,%edx + orl %edx,%eax + movl 60(%esp),%edx + roll $10,%ebp + leal 2053994217(%edi,%eax,1),%edi + movl $-1,%eax + roll $14,%edi + addl %esi,%edi + # 134 + addl %edx,%esi + movl %ebx,%edx + subl %edi,%eax + andl %edi,%edx + andl %ebp,%eax + orl %eax,%edx + movl (%esp),%eax + roll $10,%ebx + leal 2053994217(%esi,%edx,1),%esi + movl $-1,%edx + roll $6,%esi + addl %ecx,%esi + # 135 + addl %eax,%ecx + movl %edi,%eax + subl %esi,%edx + andl %esi,%eax + andl %ebx,%edx + orl %edx,%eax + movl 20(%esp),%edx + roll $10,%edi + leal 2053994217(%ecx,%eax,1),%ecx + movl $-1,%eax + roll $14,%ecx + addl %ebp,%ecx + # 136 + addl %edx,%ebp + movl %esi,%edx + subl %ecx,%eax + andl %ecx,%edx + andl %edi,%eax + orl %eax,%edx + movl 48(%esp),%eax + roll $10,%esi + leal 2053994217(%ebp,%edx,1),%ebp + movl $-1,%edx + roll $6,%ebp + addl %ebx,%ebp + # 137 + addl %eax,%ebx + movl %ecx,%eax + subl %ebp,%edx + andl %ebp,%eax + andl %esi,%edx + orl %edx,%eax + movl 8(%esp),%edx + roll $10,%ecx + leal 2053994217(%ebx,%eax,1),%ebx + movl $-1,%eax + roll $9,%ebx + addl %edi,%ebx + # 138 + addl %edx,%edi + movl %ebp,%edx + subl %ebx,%eax + andl %ebx,%edx + andl %ecx,%eax + orl %eax,%edx + movl 52(%esp),%eax + roll $10,%ebp + leal 2053994217(%edi,%edx,1),%edi + movl $-1,%edx + roll $12,%edi + addl %esi,%edi + # 139 + addl %eax,%esi + movl %ebx,%eax + subl %edi,%edx + andl %edi,%eax + andl %ebp,%edx + orl %edx,%eax + movl 36(%esp),%edx + roll $10,%ebx + leal 2053994217(%esi,%eax,1),%esi + movl $-1,%eax + roll $9,%esi + addl %ecx,%esi + # 140 + addl %edx,%ecx + movl %edi,%edx + subl %esi,%eax + andl %esi,%edx + andl %ebx,%eax + orl %eax,%edx + movl 28(%esp),%eax + roll $10,%edi + leal 2053994217(%ecx,%edx,1),%ecx + movl $-1,%edx + roll $12,%ecx + addl %ebp,%ecx + # 141 + addl %eax,%ebp + movl %esi,%eax + subl %ecx,%edx + andl %ecx,%eax + andl %edi,%edx + orl %edx,%eax + movl 40(%esp),%edx + roll $10,%esi + leal 2053994217(%ebp,%eax,1),%ebp + movl $-1,%eax + roll $5,%ebp + addl %ebx,%ebp + # 142 + addl %edx,%ebx + movl %ecx,%edx + subl %ebp,%eax + andl %ebp,%edx + andl %esi,%eax + orl %eax,%edx + movl 56(%esp),%eax + roll $10,%ecx + leal 2053994217(%ebx,%edx,1),%ebx + movl $-1,%edx + roll $15,%ebx + addl %edi,%ebx + # 143 + addl %eax,%edi + movl %ebp,%eax + subl %ebx,%edx + andl %ebx,%eax + andl %ecx,%edx + orl %eax,%edx + movl %ebx,%eax + roll $10,%ebp + leal 2053994217(%edi,%edx,1),%edi + xorl %ebp,%eax + roll $8,%edi + addl %esi,%edi + # 144 + movl 48(%esp),%edx + xorl %edi,%eax + addl %edx,%esi + roll $10,%ebx + addl %eax,%esi + movl %edi,%eax + roll $8,%esi + addl %ecx,%esi + # 145 + xorl %ebx,%eax + movl 60(%esp),%edx + xorl %esi,%eax + addl %eax,%ecx + movl %esi,%eax + roll $10,%edi + addl %edx,%ecx + xorl %edi,%eax + roll $5,%ecx + addl %ebp,%ecx + # 146 + movl 40(%esp),%edx + xorl %ecx,%eax + addl %edx,%ebp + roll $10,%esi + addl %eax,%ebp + movl %ecx,%eax + roll $12,%ebp + addl %ebx,%ebp + # 147 + xorl %esi,%eax + movl 16(%esp),%edx + xorl %ebp,%eax + addl %eax,%ebx + movl %ebp,%eax + roll $10,%ecx + addl %edx,%ebx + xorl %ecx,%eax + roll $9,%ebx + addl %edi,%ebx + # 148 + movl 4(%esp),%edx + xorl %ebx,%eax + addl %edx,%edi + roll $10,%ebp + addl %eax,%edi + movl %ebx,%eax + roll $12,%edi + addl %esi,%edi + # 149 + xorl %ebp,%eax + movl 20(%esp),%edx + xorl %edi,%eax + addl %eax,%esi + movl %edi,%eax + roll $10,%ebx + addl %edx,%esi + xorl %ebx,%eax + roll $5,%esi + addl %ecx,%esi + # 150 + movl 32(%esp),%edx + xorl %esi,%eax + addl %edx,%ecx + roll $10,%edi + addl %eax,%ecx + movl %esi,%eax + roll $14,%ecx + addl %ebp,%ecx + # 151 + xorl %edi,%eax + movl 28(%esp),%edx + xorl %ecx,%eax + addl %eax,%ebp + movl %ecx,%eax + roll $10,%esi + addl %edx,%ebp + xorl %esi,%eax + roll $6,%ebp + addl %ebx,%ebp + # 152 + movl 24(%esp),%edx + xorl %ebp,%eax + addl %edx,%ebx + roll $10,%ecx + addl %eax,%ebx + movl %ebp,%eax + roll $8,%ebx + addl %edi,%ebx + # 153 + xorl %ecx,%eax + movl 8(%esp),%edx + xorl %ebx,%eax + addl %eax,%edi + movl %ebx,%eax + roll $10,%ebp + addl %edx,%edi + xorl %ebp,%eax + roll $13,%edi + addl %esi,%edi + # 154 + movl 52(%esp),%edx + xorl %edi,%eax + addl %edx,%esi + roll $10,%ebx + addl %eax,%esi + movl %edi,%eax + roll $6,%esi + addl %ecx,%esi + # 155 + xorl %ebx,%eax + movl 56(%esp),%edx + xorl %esi,%eax + addl %eax,%ecx + movl %esi,%eax + roll $10,%edi + addl %edx,%ecx + xorl %edi,%eax + roll $5,%ecx + addl %ebp,%ecx + # 156 + movl (%esp),%edx + xorl %ecx,%eax + addl %edx,%ebp + roll $10,%esi + addl %eax,%ebp + movl %ecx,%eax + roll $15,%ebp + addl %ebx,%ebp + # 157 + xorl %esi,%eax + movl 12(%esp),%edx + xorl %ebp,%eax + addl %eax,%ebx + movl %ebp,%eax + roll $10,%ecx + addl %edx,%ebx + xorl %ecx,%eax + roll $13,%ebx + addl %edi,%ebx + # 158 + movl 36(%esp),%edx + xorl %ebx,%eax + addl %edx,%edi + roll $10,%ebp + addl %eax,%edi + movl %ebx,%eax + roll $11,%edi + addl %esi,%edi + # 159 + xorl %ebp,%eax + movl 44(%esp),%edx + xorl %edi,%eax + addl %eax,%esi + roll $10,%ebx + addl %edx,%esi + movl 128(%esp),%edx + roll $11,%esi + addl %ecx,%esi + movl 4(%edx),%eax + addl %eax,%ebx + movl 72(%esp),%eax + addl %eax,%ebx + movl 8(%edx),%eax + addl %eax,%ebp + movl 76(%esp),%eax + addl %eax,%ebp + movl 12(%edx),%eax + addl %eax,%ecx + movl 80(%esp),%eax + addl %eax,%ecx + movl 16(%edx),%eax + addl %eax,%esi + movl 64(%esp),%eax + addl %eax,%esi + movl (%edx),%eax + addl %eax,%edi + movl 68(%esp),%eax + addl %eax,%edi + movl 136(%esp),%eax + movl %ebx,(%edx) + movl %ebp,4(%edx) + movl %ecx,8(%edx) + subl $1,%eax + movl %esi,12(%edx) + movl %edi,16(%edx) + jle L001get_out + movl %eax,136(%esp) + movl %ecx,%edi + movl 132(%esp),%eax + movl %ebx,%ecx + addl $64,%eax + movl %ebp,%esi + movl %eax,132(%esp) + jmp L000start +L001get_out: + addl $108,%esp + popl %ebx + popl %ebp + popl %edi + popl %esi + ret diff --git a/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/sha/sha1-586.s b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/sha/sha1-586.s new file mode 100644 index 00000000000000..42b2788199f6dc --- /dev/null +++ b/deps/openssl/config/archs/BSD-x86/asm_avx2/crypto/sha/sha1-586.s @@ -0,0 +1,3966 @@ +.text +.globl _sha1_block_data_order +.type _sha1_block_data_order,@function +.align 4 +_sha1_block_data_order: +L_sha1_block_data_order_begin: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + call L000pic_point +L000pic_point: + popl %ebp + leal __GLOBAL_OFFSET_TABLE_+[.-L000pic_point](%ebp),%esi + movl _OPENSSL_ia32cap_P@GOT(%esi),%esi + leal LK_XX_XX-L000pic_point(%ebp),%ebp + movl (%esi),%eax + movl 4(%esi),%edx + testl $512,%edx + jz L001x86 + movl 8(%esi),%ecx + testl $16777216,%eax + jz L001x86 + testl $536870912,%ecx + jnz Lshaext_shortcut + andl $268435456,%edx + andl $1073741824,%eax + orl %edx,%eax + cmpl $1342177280,%eax + je Lavx_shortcut + jmp Lssse3_shortcut +.align 4,0x90 +L001x86: + movl 20(%esp),%ebp + movl 24(%esp),%esi + movl 28(%esp),%eax + subl $76,%esp + shll $6,%eax + addl %esi,%eax + movl %eax,104(%esp) + movl 16(%ebp),%edi + jmp L002loop +.align 4,0x90 +L002loop: + movl (%esi),%eax + movl 4(%esi),%ebx + movl 8(%esi),%ecx + movl 12(%esi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + movl %eax,(%esp) + movl %ebx,4(%esp) + movl %ecx,8(%esp) + movl %edx,12(%esp) + movl 16(%esi),%eax + movl 20(%esi),%ebx + movl 24(%esi),%ecx + movl 28(%esi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + movl %eax,16(%esp) + movl %ebx,20(%esp) + movl %ecx,24(%esp) + movl %edx,28(%esp) + movl 32(%esi),%eax + movl 36(%esi),%ebx + movl 40(%esi),%ecx + movl 44(%esi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + movl %eax,32(%esp) + movl %ebx,36(%esp) + movl %ecx,40(%esp) + movl %edx,44(%esp) + movl 48(%esi),%eax + movl 52(%esi),%ebx + movl 56(%esi),%ecx + movl 60(%esi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + movl %eax,48(%esp) + movl %ebx,52(%esp) + movl %ecx,56(%esp) + movl %edx,60(%esp) + movl %esi,100(%esp) + movl (%ebp),%eax + movl 4(%ebp),%ebx + movl 8(%ebp),%ecx + movl 12(%ebp),%edx + # 00_15 0 + movl %ecx,%esi + movl %eax,%ebp + roll $5,%ebp + xorl %edx,%esi + addl %edi,%ebp + movl (%esp),%edi + andl %ebx,%esi + rorl $2,%ebx + xorl %edx,%esi + leal 1518500249(%ebp,%edi,1),%ebp + addl %esi,%ebp + # 00_15 1 + movl %ebx,%edi + movl %ebp,%esi + roll $5,%ebp + xorl %ecx,%edi + addl %edx,%ebp + movl 4(%esp),%edx + andl %eax,%edi + rorl $2,%eax + xorl %ecx,%edi + leal 1518500249(%ebp,%edx,1),%ebp + addl %edi,%ebp + # 00_15 2 + movl %eax,%edx + movl %ebp,%edi + roll $5,%ebp + xorl %ebx,%edx + addl %ecx,%ebp + movl 8(%esp),%ecx + andl %esi,%edx + rorl $2,%esi + xorl %ebx,%edx + leal 1518500249(%ebp,%ecx,1),%ebp + addl %edx,%ebp + # 00_15 3 + movl %esi,%ecx + movl %ebp,%edx + roll $5,%ebp + xorl %eax,%ecx + addl %ebx,%ebp + movl 12(%esp),%ebx + andl %edi,%ecx + rorl $2,%edi + xorl %eax,%ecx + leal 1518500249(%ebp,%ebx,1),%ebp + addl %ecx,%ebp + # 00_15 4 + movl %edi,%ebx + movl %ebp,%ecx + roll $5,%ebp + xorl %esi,%ebx + addl %eax,%ebp + movl 16(%esp),%eax + andl %edx,%ebx + rorl $2,%edx + xorl %esi,%ebx + leal 1518500249(%ebp,%eax,1),%ebp + addl %ebx,%ebp + # 00_15 5 + movl %edx,%eax + movl %ebp,%ebx + roll $5,%ebp + xorl %edi,%eax + addl %esi,%ebp + movl 20(%esp),%esi + andl %ecx,%eax + rorl $2,%ecx + xorl %edi,%eax + leal 1518500249(%ebp,%esi,1),%ebp + addl %eax,%ebp + # 00_15 6 + movl %ecx,%esi + movl %ebp,%eax + roll $5,%ebp + xorl %edx,%esi + addl %edi,%ebp + movl 24(%esp),%edi + andl %ebx,%esi + rorl $2,%ebx + xorl %edx,%esi + leal 1518500249(%ebp,%edi,1),%ebp + addl %esi,%ebp + # 00_15 7 + movl %ebx,%edi + movl %ebp,%esi + roll $5,%ebp + xorl %ecx,%edi + addl %edx,%ebp + movl 28(%esp),%edx + andl %eax,%edi + rorl $2,%eax + xorl %ecx,%edi + leal 1518500249(%ebp,%edx,1),%ebp + addl %edi,%ebp + # 00_15 8 + movl %eax,%edx + movl %ebp,%edi + roll $5,%ebp + xorl %ebx,%edx + addl %ecx,%ebp + movl 32(%esp),%ecx + andl %esi,%edx + rorl $2,%esi + xorl %ebx,%edx + leal 1518500249(%ebp,%ecx,1),%ebp + addl %edx,%ebp + # 00_15 9 + movl %esi,%ecx + movl %ebp,%edx + roll $5,%ebp + xorl %eax,%ecx + addl %ebx,%ebp + movl 36(%esp),%ebx + andl %edi,%ecx + rorl $2,%edi + xorl %eax,%ecx + leal 1518500249(%ebp,%ebx,1),%ebp + addl %ecx,%ebp + # 00_15 10 + movl %edi,%ebx + movl %ebp,%ecx + roll $5,%ebp + xorl %esi,%ebx + addl %eax,%ebp + movl 40(%esp),%eax + andl %edx,%ebx + rorl $2,%edx + xorl %esi,%ebx + leal 1518500249(%ebp,%eax,1),%ebp + addl %ebx,%ebp + # 00_15 11 + movl %edx,%eax + movl %ebp,%ebx + roll $5,%ebp + xorl %edi,%eax + addl %esi,%ebp + movl 44(%esp),%esi + andl %ecx,%eax + rorl $2,%ecx + xorl %edi,%eax + leal 1518500249(%ebp,%esi,1),%ebp + addl %eax,%ebp + # 00_15 12 + movl %ecx,%esi + movl %ebp,%eax + roll $5,%ebp + xorl %edx,%esi + addl %edi,%ebp + movl 48(%esp),%edi + andl %ebx,%esi + rorl $2,%ebx + xorl %edx,%esi + leal 1518500249(%ebp,%edi,1),%ebp + addl %esi,%ebp + # 00_15 13 + movl %ebx,%edi + movl %ebp,%esi + roll $5,%ebp + xorl %ecx,%edi + addl %edx,%ebp + movl 52(%esp),%edx + andl %eax,%edi + rorl $2,%eax + xorl %ecx,%edi + leal 1518500249(%ebp,%edx,1),%ebp + addl %edi,%ebp + # 00_15 14 + movl %eax,%edx + movl %ebp,%edi + roll $5,%ebp + xorl %ebx,%edx + addl %ecx,%ebp + movl 56(%esp),%ecx + andl %esi,%edx + rorl $2,%esi + xorl %ebx,%edx + leal 1518500249(%ebp,%ecx,1),%ebp + addl %edx,%ebp + # 00_15 15 + movl %esi,%ecx + movl %ebp,%edx + roll $5,%ebp + xorl %eax,%ecx + addl %ebx,%ebp + movl 60(%esp),%ebx + andl %edi,%ecx + rorl $2,%edi + xorl %eax,%ecx + leal 1518500249(%ebp,%ebx,1),%ebp + movl (%esp),%ebx + addl %ebp,%ecx + # 16_19 16 + movl %edi,%ebp + xorl 8(%esp),%ebx + xorl %esi,%ebp + xorl 32(%esp),%ebx + andl %edx,%ebp + xorl 52(%esp),%ebx + roll $1,%ebx + xorl %esi,%ebp + addl %ebp,%eax + movl %ecx,%ebp + rorl $2,%edx + movl %ebx,(%esp) + roll $5,%ebp + leal 1518500249(%ebx,%eax,1),%ebx + movl 4(%esp),%eax + addl %ebp,%ebx + # 16_19 17 + movl %edx,%ebp + xorl 12(%esp),%eax + xorl %edi,%ebp + xorl 36(%esp),%eax + andl %ecx,%ebp + xorl 56(%esp),%eax + roll $1,%eax + xorl %edi,%ebp + addl %ebp,%esi + movl %ebx,%ebp + rorl $2,%ecx + movl %eax,4(%esp) + roll $5,%ebp + leal 1518500249(%eax,%esi,1),%eax + movl 8(%esp),%esi + addl %ebp,%eax + # 16_19 18 + movl %ecx,%ebp + xorl 16(%esp),%esi + xorl %edx,%ebp + xorl 40(%esp),%esi + andl %ebx,%ebp + xorl 60(%esp),%esi + roll $1,%esi + xorl %edx,%ebp + addl %ebp,%edi + movl %eax,%ebp + rorl $2,%ebx + movl %esi,8(%esp) + roll $5,%ebp + leal 1518500249(%esi,%edi,1),%esi + movl 12(%esp),%edi + addl %ebp,%esi + # 16_19 19 + movl %ebx,%ebp + xorl 20(%esp),%edi + xorl %ecx,%ebp + xorl 44(%esp),%edi + andl %eax,%ebp + xorl (%esp),%edi + roll $1,%edi + xorl %ecx,%ebp + addl %ebp,%edx + movl %esi,%ebp + rorl $2,%eax + movl %edi,12(%esp) + roll $5,%ebp + leal 1518500249(%edi,%edx,1),%edi + movl 16(%esp),%edx + addl %ebp,%edi + # 20_39 20 + movl %esi,%ebp + xorl 24(%esp),%edx + xorl %eax,%ebp + xorl 48(%esp),%edx + xorl %ebx,%ebp + xorl 4(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,16(%esp) + leal 1859775393(%edx,%ecx,1),%edx + movl 20(%esp),%ecx + addl %ebp,%edx + # 20_39 21 + movl %edi,%ebp + xorl 28(%esp),%ecx + xorl %esi,%ebp + xorl 52(%esp),%ecx + xorl %eax,%ebp + xorl 8(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,20(%esp) + leal 1859775393(%ecx,%ebx,1),%ecx + movl 24(%esp),%ebx + addl %ebp,%ecx + # 20_39 22 + movl %edx,%ebp + xorl 32(%esp),%ebx + xorl %edi,%ebp + xorl 56(%esp),%ebx + xorl %esi,%ebp + xorl 12(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,24(%esp) + leal 1859775393(%ebx,%eax,1),%ebx + movl 28(%esp),%eax + addl %ebp,%ebx + # 20_39 23 + movl %ecx,%ebp + xorl 36(%esp),%eax + xorl %edx,%ebp + xorl 60(%esp),%eax + xorl %edi,%ebp + xorl 16(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,28(%esp) + leal 1859775393(%eax,%esi,1),%eax + movl 32(%esp),%esi + addl %ebp,%eax + # 20_39 24 + movl %ebx,%ebp + xorl 40(%esp),%esi + xorl %ecx,%ebp + xorl (%esp),%esi + xorl %edx,%ebp + xorl 20(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,32(%esp) + leal 1859775393(%esi,%edi,1),%esi + movl 36(%esp),%edi + addl %ebp,%esi + # 20_39 25 + movl %eax,%ebp + xorl 44(%esp),%edi + xorl %ebx,%ebp + xorl 4(%esp),%edi + xorl %ecx,%ebp + xorl 24(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,36(%esp) + leal 1859775393(%edi,%edx,1),%edi + movl 40(%esp),%edx + addl %ebp,%edi + # 20_39 26 + movl %esi,%ebp + xorl 48(%esp),%edx + xorl %eax,%ebp + xorl 8(%esp),%edx + xorl %ebx,%ebp + xorl 28(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,40(%esp) + leal 1859775393(%edx,%ecx,1),%edx + movl 44(%esp),%ecx + addl %ebp,%edx + # 20_39 27 + movl %edi,%ebp + xorl 52(%esp),%ecx + xorl %esi,%ebp + xorl 12(%esp),%ecx + xorl %eax,%ebp + xorl 32(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,44(%esp) + leal 1859775393(%ecx,%ebx,1),%ecx + movl 48(%esp),%ebx + addl %ebp,%ecx + # 20_39 28 + movl %edx,%ebp + xorl 56(%esp),%ebx + xorl %edi,%ebp + xorl 16(%esp),%ebx + xorl %esi,%ebp + xorl 36(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,48(%esp) + leal 1859775393(%ebx,%eax,1),%ebx + movl 52(%esp),%eax + addl %ebp,%ebx + # 20_39 29 + movl %ecx,%ebp + xorl 60(%esp),%eax + xorl %edx,%ebp + xorl 20(%esp),%eax + xorl %edi,%ebp + xorl 40(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,52(%esp) + leal 1859775393(%eax,%esi,1),%eax + movl 56(%esp),%esi + addl %ebp,%eax + # 20_39 30 + movl %ebx,%ebp + xorl (%esp),%esi + xorl %ecx,%ebp + xorl 24(%esp),%esi + xorl %edx,%ebp + xorl 44(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,56(%esp) + leal 1859775393(%esi,%edi,1),%esi + movl 60(%esp),%edi + addl %ebp,%esi + # 20_39 31 + movl %eax,%ebp + xorl 4(%esp),%edi + xorl %ebx,%ebp + xorl 28(%esp),%edi + xorl %ecx,%ebp + xorl 48(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,60(%esp) + leal 1859775393(%edi,%edx,1),%edi + movl (%esp),%edx + addl %ebp,%edi + # 20_39 32 + movl %esi,%ebp + xorl 8(%esp),%edx + xorl %eax,%ebp + xorl 32(%esp),%edx + xorl %ebx,%ebp + xorl 52(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,(%esp) + leal 1859775393(%edx,%ecx,1),%edx + movl 4(%esp),%ecx + addl %ebp,%edx + # 20_39 33 + movl %edi,%ebp + xorl 12(%esp),%ecx + xorl %esi,%ebp + xorl 36(%esp),%ecx + xorl %eax,%ebp + xorl 56(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,4(%esp) + leal 1859775393(%ecx,%ebx,1),%ecx + movl 8(%esp),%ebx + addl %ebp,%ecx + # 20_39 34 + movl %edx,%ebp + xorl 16(%esp),%ebx + xorl %edi,%ebp + xorl 40(%esp),%ebx + xorl %esi,%ebp + xorl 60(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,8(%esp) + leal 1859775393(%ebx,%eax,1),%ebx + movl 12(%esp),%eax + addl %ebp,%ebx + # 20_39 35 + movl %ecx,%ebp + xorl 20(%esp),%eax + xorl %edx,%ebp + xorl 44(%esp),%eax + xorl %edi,%ebp + xorl (%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,12(%esp) + leal 1859775393(%eax,%esi,1),%eax + movl 16(%esp),%esi + addl %ebp,%eax + # 20_39 36 + movl %ebx,%ebp + xorl 24(%esp),%esi + xorl %ecx,%ebp + xorl 48(%esp),%esi + xorl %edx,%ebp + xorl 4(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,16(%esp) + leal 1859775393(%esi,%edi,1),%esi + movl 20(%esp),%edi + addl %ebp,%esi + # 20_39 37 + movl %eax,%ebp + xorl 28(%esp),%edi + xorl %ebx,%ebp + xorl 52(%esp),%edi + xorl %ecx,%ebp + xorl 8(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,20(%esp) + leal 1859775393(%edi,%edx,1),%edi + movl 24(%esp),%edx + addl %ebp,%edi + # 20_39 38 + movl %esi,%ebp + xorl 32(%esp),%edx + xorl %eax,%ebp + xorl 56(%esp),%edx + xorl %ebx,%ebp + xorl 12(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,24(%esp) + leal 1859775393(%edx,%ecx,1),%edx + movl 28(%esp),%ecx + addl %ebp,%edx + # 20_39 39 + movl %edi,%ebp + xorl 36(%esp),%ecx + xorl %esi,%ebp + xorl 60(%esp),%ecx + xorl %eax,%ebp + xorl 16(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,28(%esp) + leal 1859775393(%ecx,%ebx,1),%ecx + movl 32(%esp),%ebx + addl %ebp,%ecx + # 40_59 40 + movl %edi,%ebp + xorl 40(%esp),%ebx + xorl %esi,%ebp + xorl (%esp),%ebx + andl %edx,%ebp + xorl 20(%esp),%ebx + roll $1,%ebx + addl %eax,%ebp + rorl $2,%edx + movl %ecx,%eax + roll $5,%eax + movl %ebx,32(%esp) + leal 2400959708(%ebx,%ebp,1),%ebx + movl %edi,%ebp + addl %eax,%ebx + andl %esi,%ebp + movl 36(%esp),%eax + addl %ebp,%ebx + # 40_59 41 + movl %edx,%ebp + xorl 44(%esp),%eax + xorl %edi,%ebp + xorl 4(%esp),%eax + andl %ecx,%ebp + xorl 24(%esp),%eax + roll $1,%eax + addl %esi,%ebp + rorl $2,%ecx + movl %ebx,%esi + roll $5,%esi + movl %eax,36(%esp) + leal 2400959708(%eax,%ebp,1),%eax + movl %edx,%ebp + addl %esi,%eax + andl %edi,%ebp + movl 40(%esp),%esi + addl %ebp,%eax + # 40_59 42 + movl %ecx,%ebp + xorl 48(%esp),%esi + xorl %edx,%ebp + xorl 8(%esp),%esi + andl %ebx,%ebp + xorl 28(%esp),%esi + roll $1,%esi + addl %edi,%ebp + rorl $2,%ebx + movl %eax,%edi + roll $5,%edi + movl %esi,40(%esp) + leal 2400959708(%esi,%ebp,1),%esi + movl %ecx,%ebp + addl %edi,%esi + andl %edx,%ebp + movl 44(%esp),%edi + addl %ebp,%esi + # 40_59 43 + movl %ebx,%ebp + xorl 52(%esp),%edi + xorl %ecx,%ebp + xorl 12(%esp),%edi + andl %eax,%ebp + xorl 32(%esp),%edi + roll $1,%edi + addl %edx,%ebp + rorl $2,%eax + movl %esi,%edx + roll $5,%edx + movl %edi,44(%esp) + leal 2400959708(%edi,%ebp,1),%edi + movl %ebx,%ebp + addl %edx,%edi + andl %ecx,%ebp + movl 48(%esp),%edx + addl %ebp,%edi + # 40_59 44 + movl %eax,%ebp + xorl 56(%esp),%edx + xorl %ebx,%ebp + xorl 16(%esp),%edx + andl %esi,%ebp + xorl 36(%esp),%edx + roll $1,%edx + addl %ecx,%ebp + rorl $2,%esi + movl %edi,%ecx + roll $5,%ecx + movl %edx,48(%esp) + leal 2400959708(%edx,%ebp,1),%edx + movl %eax,%ebp + addl %ecx,%edx + andl %ebx,%ebp + movl 52(%esp),%ecx + addl %ebp,%edx + # 40_59 45 + movl %esi,%ebp + xorl 60(%esp),%ecx + xorl %eax,%ebp + xorl 20(%esp),%ecx + andl %edi,%ebp + xorl 40(%esp),%ecx + roll $1,%ecx + addl %ebx,%ebp + rorl $2,%edi + movl %edx,%ebx + roll $5,%ebx + movl %ecx,52(%esp) + leal 2400959708(%ecx,%ebp,1),%ecx + movl %esi,%ebp + addl %ebx,%ecx + andl %eax,%ebp + movl 56(%esp),%ebx + addl %ebp,%ecx + # 40_59 46 + movl %edi,%ebp + xorl (%esp),%ebx + xorl %esi,%ebp + xorl 24(%esp),%ebx + andl %edx,%ebp + xorl 44(%esp),%ebx + roll $1,%ebx + addl %eax,%ebp + rorl $2,%edx + movl %ecx,%eax + roll $5,%eax + movl %ebx,56(%esp) + leal 2400959708(%ebx,%ebp,1),%ebx + movl %edi,%ebp + addl %eax,%ebx + andl %esi,%ebp + movl 60(%esp),%eax + addl %ebp,%ebx + # 40_59 47 + movl %edx,%ebp + xorl 4(%esp),%eax + xorl %edi,%ebp + xorl 28(%esp),%eax + andl %ecx,%ebp + xorl 48(%esp),%eax + roll $1,%eax + addl %esi,%ebp + rorl $2,%ecx + movl %ebx,%esi + roll $5,%esi + movl %eax,60(%esp) + leal 2400959708(%eax,%ebp,1),%eax + movl %edx,%ebp + addl %esi,%eax + andl %edi,%ebp + movl (%esp),%esi + addl %ebp,%eax + # 40_59 48 + movl %ecx,%ebp + xorl 8(%esp),%esi + xorl %edx,%ebp + xorl 32(%esp),%esi + andl %ebx,%ebp + xorl 52(%esp),%esi + roll $1,%esi + addl %edi,%ebp + rorl $2,%ebx + movl %eax,%edi + roll $5,%edi + movl %esi,(%esp) + leal 2400959708(%esi,%ebp,1),%esi + movl %ecx,%ebp + addl %edi,%esi + andl %edx,%ebp + movl 4(%esp),%edi + addl %ebp,%esi + # 40_59 49 + movl %ebx,%ebp + xorl 12(%esp),%edi + xorl %ecx,%ebp + xorl 36(%esp),%edi + andl %eax,%ebp + xorl 56(%esp),%edi + roll $1,%edi + addl %edx,%ebp + rorl $2,%eax + movl %esi,%edx + roll $5,%edx + movl %edi,4(%esp) + leal 2400959708(%edi,%ebp,1),%edi + movl %ebx,%ebp + addl %edx,%edi + andl %ecx,%ebp + movl 8(%esp),%edx + addl %ebp,%edi + # 40_59 50 + movl %eax,%ebp + xorl 16(%esp),%edx + xorl %ebx,%ebp + xorl 40(%esp),%edx + andl %esi,%ebp + xorl 60(%esp),%edx + roll $1,%edx + addl %ecx,%ebp + rorl $2,%esi + movl %edi,%ecx + roll $5,%ecx + movl %edx,8(%esp) + leal 2400959708(%edx,%ebp,1),%edx + movl %eax,%ebp + addl %ecx,%edx + andl %ebx,%ebp + movl 12(%esp),%ecx + addl %ebp,%edx + # 40_59 51 + movl %esi,%ebp + xorl 20(%esp),%ecx + xorl %eax,%ebp + xorl 44(%esp),%ecx + andl %edi,%ebp + xorl (%esp),%ecx + roll $1,%ecx + addl %ebx,%ebp + rorl $2,%edi + movl %edx,%ebx + roll $5,%ebx + movl %ecx,12(%esp) + leal 2400959708(%ecx,%ebp,1),%ecx + movl %esi,%ebp + addl %ebx,%ecx + andl %eax,%ebp + movl 16(%esp),%ebx + addl %ebp,%ecx + # 40_59 52 + movl %edi,%ebp + xorl 24(%esp),%ebx + xorl %esi,%ebp + xorl 48(%esp),%ebx + andl %edx,%ebp + xorl 4(%esp),%ebx + roll $1,%ebx + addl %eax,%ebp + rorl $2,%edx + movl %ecx,%eax + roll $5,%eax + movl %ebx,16(%esp) + leal 2400959708(%ebx,%ebp,1),%ebx + movl %edi,%ebp + addl %eax,%ebx + andl %esi,%ebp + movl 20(%esp),%eax + addl %ebp,%ebx + # 40_59 53 + movl %edx,%ebp + xorl 28(%esp),%eax + xorl %edi,%ebp + xorl 52(%esp),%eax + andl %ecx,%ebp + xorl 8(%esp),%eax + roll $1,%eax + addl %esi,%ebp + rorl $2,%ecx + movl %ebx,%esi + roll $5,%esi + movl %eax,20(%esp) + leal 2400959708(%eax,%ebp,1),%eax + movl %edx,%ebp + addl %esi,%eax + andl %edi,%ebp + movl 24(%esp),%esi + addl %ebp,%eax + # 40_59 54 + movl %ecx,%ebp + xorl 32(%esp),%esi + xorl %edx,%ebp + xorl 56(%esp),%esi + andl %ebx,%ebp + xorl 12(%esp),%esi + roll $1,%esi + addl %edi,%ebp + rorl $2,%ebx + movl %eax,%edi + roll $5,%edi + movl %esi,24(%esp) + leal 2400959708(%esi,%ebp,1),%esi + movl %ecx,%ebp + addl %edi,%esi + andl %edx,%ebp + movl 28(%esp),%edi + addl %ebp,%esi + # 40_59 55 + movl %ebx,%ebp + xorl 36(%esp),%edi + xorl %ecx,%ebp + xorl 60(%esp),%edi + andl %eax,%ebp + xorl 16(%esp),%edi + roll $1,%edi + addl %edx,%ebp + rorl $2,%eax + movl %esi,%edx + roll $5,%edx + movl %edi,28(%esp) + leal 2400959708(%edi,%ebp,1),%edi + movl %ebx,%ebp + addl %edx,%edi + andl %ecx,%ebp + movl 32(%esp),%edx + addl %ebp,%edi + # 40_59 56 + movl %eax,%ebp + xorl 40(%esp),%edx + xorl %ebx,%ebp + xorl (%esp),%edx + andl %esi,%ebp + xorl 20(%esp),%edx + roll $1,%edx + addl %ecx,%ebp + rorl $2,%esi + movl %edi,%ecx + roll $5,%ecx + movl %edx,32(%esp) + leal 2400959708(%edx,%ebp,1),%edx + movl %eax,%ebp + addl %ecx,%edx + andl %ebx,%ebp + movl 36(%esp),%ecx + addl %ebp,%edx + # 40_59 57 + movl %esi,%ebp + xorl 44(%esp),%ecx + xorl %eax,%ebp + xorl 4(%esp),%ecx + andl %edi,%ebp + xorl 24(%esp),%ecx + roll $1,%ecx + addl %ebx,%ebp + rorl $2,%edi + movl %edx,%ebx + roll $5,%ebx + movl %ecx,36(%esp) + leal 2400959708(%ecx,%ebp,1),%ecx + movl %esi,%ebp + addl %ebx,%ecx + andl %eax,%ebp + movl 40(%esp),%ebx + addl %ebp,%ecx + # 40_59 58 + movl %edi,%ebp + xorl 48(%esp),%ebx + xorl %esi,%ebp + xorl 8(%esp),%ebx + andl %edx,%ebp + xorl 28(%esp),%ebx + roll $1,%ebx + addl %eax,%ebp + rorl $2,%edx + movl %ecx,%eax + roll $5,%eax + movl %ebx,40(%esp) + leal 2400959708(%ebx,%ebp,1),%ebx + movl %edi,%ebp + addl %eax,%ebx + andl %esi,%ebp + movl 44(%esp),%eax + addl %ebp,%ebx + # 40_59 59 + movl %edx,%ebp + xorl 52(%esp),%eax + xorl %edi,%ebp + xorl 12(%esp),%eax + andl %ecx,%ebp + xorl 32(%esp),%eax + roll $1,%eax + addl %esi,%ebp + rorl $2,%ecx + movl %ebx,%esi + roll $5,%esi + movl %eax,44(%esp) + leal 2400959708(%eax,%ebp,1),%eax + movl %edx,%ebp + addl %esi,%eax + andl %edi,%ebp + movl 48(%esp),%esi + addl %ebp,%eax + # 20_39 60 + movl %ebx,%ebp + xorl 56(%esp),%esi + xorl %ecx,%ebp + xorl 16(%esp),%esi + xorl %edx,%ebp + xorl 36(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,48(%esp) + leal 3395469782(%esi,%edi,1),%esi + movl 52(%esp),%edi + addl %ebp,%esi + # 20_39 61 + movl %eax,%ebp + xorl 60(%esp),%edi + xorl %ebx,%ebp + xorl 20(%esp),%edi + xorl %ecx,%ebp + xorl 40(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,52(%esp) + leal 3395469782(%edi,%edx,1),%edi + movl 56(%esp),%edx + addl %ebp,%edi + # 20_39 62 + movl %esi,%ebp + xorl (%esp),%edx + xorl %eax,%ebp + xorl 24(%esp),%edx + xorl %ebx,%ebp + xorl 44(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,56(%esp) + leal 3395469782(%edx,%ecx,1),%edx + movl 60(%esp),%ecx + addl %ebp,%edx + # 20_39 63 + movl %edi,%ebp + xorl 4(%esp),%ecx + xorl %esi,%ebp + xorl 28(%esp),%ecx + xorl %eax,%ebp + xorl 48(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,60(%esp) + leal 3395469782(%ecx,%ebx,1),%ecx + movl (%esp),%ebx + addl %ebp,%ecx + # 20_39 64 + movl %edx,%ebp + xorl 8(%esp),%ebx + xorl %edi,%ebp + xorl 32(%esp),%ebx + xorl %esi,%ebp + xorl 52(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,(%esp) + leal 3395469782(%ebx,%eax,1),%ebx + movl 4(%esp),%eax + addl %ebp,%ebx + # 20_39 65 + movl %ecx,%ebp + xorl 12(%esp),%eax + xorl %edx,%ebp + xorl 36(%esp),%eax + xorl %edi,%ebp + xorl 56(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,4(%esp) + leal 3395469782(%eax,%esi,1),%eax + movl 8(%esp),%esi + addl %ebp,%eax + # 20_39 66 + movl %ebx,%ebp + xorl 16(%esp),%esi + xorl %ecx,%ebp + xorl 40(%esp),%esi + xorl %edx,%ebp + xorl 60(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,8(%esp) + leal 3395469782(%esi,%edi,1),%esi + movl 12(%esp),%edi + addl %ebp,%esi + # 20_39 67 + movl %eax,%ebp + xorl 20(%esp),%edi + xorl %ebx,%ebp + xorl 44(%esp),%edi + xorl %ecx,%ebp + xorl (%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,12(%esp) + leal 3395469782(%edi,%edx,1),%edi + movl 16(%esp),%edx + addl %ebp,%edi + # 20_39 68 + movl %esi,%ebp + xorl 24(%esp),%edx + xorl %eax,%ebp + xorl 48(%esp),%edx + xorl %ebx,%ebp + xorl 4(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,16(%esp) + leal 3395469782(%edx,%ecx,1),%edx + movl 20(%esp),%ecx + addl %ebp,%edx + # 20_39 69 + movl %edi,%ebp + xorl 28(%esp),%ecx + xorl %esi,%ebp + xorl 52(%esp),%ecx + xorl %eax,%ebp + xorl 8(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,20(%esp) + leal 3395469782(%ecx,%ebx,1),%ecx + movl 24(%esp),%ebx + addl %ebp,%ecx + # 20_39 70 + movl %edx,%ebp + xorl 32(%esp),%ebx + xorl %edi,%ebp + xorl 56(%esp),%ebx + xorl %esi,%ebp + xorl 12(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,24(%esp) + leal 3395469782(%ebx,%eax,1),%ebx + movl 28(%esp),%eax + addl %ebp,%ebx + # 20_39 71 + movl %ecx,%ebp + xorl 36(%esp),%eax + xorl %edx,%ebp + xorl 60(%esp),%eax + xorl %edi,%ebp + xorl 16(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,28(%esp) + leal 3395469782(%eax,%esi,1),%eax + movl 32(%esp),%esi + addl %ebp,%eax + # 20_39 72 + movl %ebx,%ebp + xorl 40(%esp),%esi + xorl %ecx,%ebp + xorl (%esp),%esi + xorl %edx,%ebp + xorl 20(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,32(%esp) + leal 3395469782(%esi,%edi,1),%esi + movl 36(%esp),%edi + addl %ebp,%esi + # 20_39 73 + movl %eax,%ebp + xorl 44(%esp),%edi + xorl %ebx,%ebp + xorl 4(%esp),%edi + xorl %ecx,%ebp + xorl 24(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,36(%esp) + leal 3395469782(%edi,%edx,1),%edi + movl 40(%esp),%edx + addl %ebp,%edi + # 20_39 74 + movl %esi,%ebp + xorl 48(%esp),%edx + xorl %eax,%ebp + xorl 8(%esp),%edx + xorl %ebx,%ebp + xorl 28(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,40(%esp) + leal 3395469782(%edx,%ecx,1),%edx + movl 44(%esp),%ecx + addl %ebp,%edx + # 20_39 75 + movl %edi,%ebp + xorl 52(%esp),%ecx + xorl %esi,%ebp + xorl 12(%esp),%ecx + xorl %eax,%ebp + xorl 32(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,44(%esp) + leal 3395469782(%ecx,%ebx,1),%ecx + movl 48(%esp),%ebx + addl %ebp,%ecx + # 20_39 76 + movl %edx,%ebp + xorl 56(%esp),%ebx + xorl %edi,%ebp + xorl 16(%esp),%ebx + xorl %esi,%ebp + xorl 36(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,48(%esp) + leal 3395469782(%ebx,%eax,1),%ebx + movl 52(%esp),%eax + addl %ebp,%ebx + # 20_39 77 + movl %ecx,%ebp + xorl 60(%esp),%eax + xorl %edx,%ebp + xorl 20(%esp),%eax + xorl %edi,%ebp + xorl 40(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + leal 3395469782(%eax,%esi,1),%eax + movl 56(%esp),%esi + addl %ebp,%eax + # 20_39 78 + movl %ebx,%ebp + xorl (%esp),%esi + xorl %ecx,%ebp + xorl 24(%esp),%esi + xorl %edx,%ebp + xorl 44(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + leal 3395469782(%esi,%edi,1),%esi + movl 60(%esp),%edi + addl %ebp,%esi + # 20_39 79 + movl %eax,%ebp + xorl 4(%esp),%edi + xorl %ebx,%ebp + xorl 28(%esp),%edi + xorl %ecx,%ebp + xorl 48(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + leal 3395469782(%edi,%edx,1),%edi + addl %ebp,%edi + movl 96(%esp),%ebp + movl 100(%esp),%edx + addl (%ebp),%edi + addl 4(%ebp),%esi + addl 8(%ebp),%eax + addl 12(%ebp),%ebx + addl 16(%ebp),%ecx + movl %edi,(%ebp) + addl $64,%edx + movl %esi,4(%ebp) + cmpl 104(%esp),%edx + movl %eax,8(%ebp) + movl %ecx,%edi + movl %ebx,12(%ebp) + movl %edx,%esi + movl %ecx,16(%ebp) + jb L002loop + addl $76,%esp + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.type __sha1_block_data_order_shaext,@function +.align 4 +__sha1_block_data_order_shaext: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + call L003pic_point +L003pic_point: + popl %ebp + leal LK_XX_XX-L003pic_point(%ebp),%ebp +Lshaext_shortcut: + movl 20(%esp),%edi + movl %esp,%ebx + movl 24(%esp),%esi + movl 28(%esp),%ecx + subl $32,%esp + movdqu (%edi),%xmm0 + movd 16(%edi),%xmm1 + andl $-32,%esp + movdqa 80(%ebp),%xmm3 + movdqu (%esi),%xmm4 + pshufd $27,%xmm0,%xmm0 + movdqu 16(%esi),%xmm5 + pshufd $27,%xmm1,%xmm1 + movdqu 32(%esi),%xmm6 +.byte 102,15,56,0,227 + movdqu 48(%esi),%xmm7 +.byte 102,15,56,0,235 +.byte 102,15,56,0,243 +.byte 102,15,56,0,251 + jmp L004loop_shaext +.align 4,0x90 +L004loop_shaext: + decl %ecx + leal 64(%esi),%eax + movdqa %xmm1,(%esp) + paddd %xmm4,%xmm1 + cmovnel %eax,%esi + movdqa %xmm0,16(%esp) +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,0 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,0 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,0 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,0 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,0 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,1 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,1 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,1 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,1 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,1 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,2 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,2 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,2 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,2 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,2 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,3 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 + movdqu (%esi),%xmm4 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,3 +.byte 15,56,200,213 + movdqu 16(%esi),%xmm5 +.byte 102,15,56,0,227 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,3 +.byte 15,56,200,206 + movdqu 32(%esi),%xmm6 +.byte 102,15,56,0,235 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,3 +.byte 15,56,200,215 + movdqu 48(%esi),%xmm7 +.byte 102,15,56,0,243 + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,3 + movdqa (%esp),%xmm2 +.byte 102,15,56,0,251 +.byte 15,56,200,202 + paddd 16(%esp),%xmm0 + jnz L004loop_shaext + pshufd $27,%xmm0,%xmm0 + pshufd $27,%xmm1,%xmm1 + movdqu %xmm0,(%edi) + movd %xmm1,16(%edi) + movl %ebx,%esp + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.type __sha1_block_data_order_ssse3,@function +.align 4 +__sha1_block_data_order_ssse3: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + call L005pic_point +L005pic_point: + popl %ebp + leal LK_XX_XX-L005pic_point(%ebp),%ebp +Lssse3_shortcut: + movdqa (%ebp),%xmm7 + movdqa 16(%ebp),%xmm0 + movdqa 32(%ebp),%xmm1 + movdqa 48(%ebp),%xmm2 + movdqa 64(%ebp),%xmm6 + movl 20(%esp),%edi + movl 24(%esp),%ebp + movl 28(%esp),%edx + movl %esp,%esi + subl $208,%esp + andl $-64,%esp + movdqa %xmm0,112(%esp) + movdqa %xmm1,128(%esp) + movdqa %xmm2,144(%esp) + shll $6,%edx + movdqa %xmm7,160(%esp) + addl %ebp,%edx + movdqa %xmm6,176(%esp) + addl $64,%ebp + movl %edi,192(%esp) + movl %ebp,196(%esp) + movl %edx,200(%esp) + movl %esi,204(%esp) + movl (%edi),%eax + movl 4(%edi),%ebx + movl 8(%edi),%ecx + movl 12(%edi),%edx + movl 16(%edi),%edi + movl %ebx,%esi + movdqu -64(%ebp),%xmm0 + movdqu -48(%ebp),%xmm1 + movdqu -32(%ebp),%xmm2 + movdqu -16(%ebp),%xmm3 +.byte 102,15,56,0,198 +.byte 102,15,56,0,206 +.byte 102,15,56,0,214 + movdqa %xmm7,96(%esp) +.byte 102,15,56,0,222 + paddd %xmm7,%xmm0 + paddd %xmm7,%xmm1 + paddd %xmm7,%xmm2 + movdqa %xmm0,(%esp) + psubd %xmm7,%xmm0 + movdqa %xmm1,16(%esp) + psubd %xmm7,%xmm1 + movdqa %xmm2,32(%esp) + movl %ecx,%ebp + psubd %xmm7,%xmm2 + xorl %edx,%ebp + pshufd $238,%xmm0,%xmm4 + andl %ebp,%esi + jmp L006loop +.align 4,0x90 +L006loop: + rorl $2,%ebx + xorl %edx,%esi + movl %eax,%ebp + punpcklqdq %xmm1,%xmm4 + movdqa %xmm3,%xmm6 + addl (%esp),%edi + xorl %ecx,%ebx + paddd %xmm3,%xmm7 + movdqa %xmm0,64(%esp) + roll $5,%eax + addl %esi,%edi + psrldq $4,%xmm6 + andl %ebx,%ebp + xorl %ecx,%ebx + pxor %xmm0,%xmm4 + addl %eax,%edi + rorl $7,%eax + pxor %xmm2,%xmm6 + xorl %ecx,%ebp + movl %edi,%esi + addl 4(%esp),%edx + pxor %xmm6,%xmm4 + xorl %ebx,%eax + roll $5,%edi + movdqa %xmm7,48(%esp) + addl %ebp,%edx + andl %eax,%esi + movdqa %xmm4,%xmm0 + xorl %ebx,%eax + addl %edi,%edx + rorl $7,%edi + movdqa %xmm4,%xmm6 + xorl %ebx,%esi + pslldq $12,%xmm0 + paddd %xmm4,%xmm4 + movl %edx,%ebp + addl 8(%esp),%ecx + psrld $31,%xmm6 + xorl %eax,%edi + roll $5,%edx + movdqa %xmm0,%xmm7 + addl %esi,%ecx + andl %edi,%ebp + xorl %eax,%edi + psrld $30,%xmm0 + addl %edx,%ecx + rorl $7,%edx + por %xmm6,%xmm4 + xorl %eax,%ebp + movl %ecx,%esi + addl 12(%esp),%ebx + pslld $2,%xmm7 + xorl %edi,%edx + roll $5,%ecx + pxor %xmm0,%xmm4 + movdqa 96(%esp),%xmm0 + addl %ebp,%ebx + andl %edx,%esi + pxor %xmm7,%xmm4 + pshufd $238,%xmm1,%xmm5 + xorl %edi,%edx + addl %ecx,%ebx + rorl $7,%ecx + xorl %edi,%esi + movl %ebx,%ebp + punpcklqdq %xmm2,%xmm5 + movdqa %xmm4,%xmm7 + addl 16(%esp),%eax + xorl %edx,%ecx + paddd %xmm4,%xmm0 + movdqa %xmm1,80(%esp) + roll $5,%ebx + addl %esi,%eax + psrldq $4,%xmm7 + andl %ecx,%ebp + xorl %edx,%ecx + pxor %xmm1,%xmm5 + addl %ebx,%eax + rorl $7,%ebx + pxor %xmm3,%xmm7 + xorl %edx,%ebp + movl %eax,%esi + addl 20(%esp),%edi + pxor %xmm7,%xmm5 + xorl %ecx,%ebx + roll $5,%eax + movdqa %xmm0,(%esp) + addl %ebp,%edi + andl %ebx,%esi + movdqa %xmm5,%xmm1 + xorl %ecx,%ebx + addl %eax,%edi + rorl $7,%eax + movdqa %xmm5,%xmm7 + xorl %ecx,%esi + pslldq $12,%xmm1 + paddd %xmm5,%xmm5 + movl %edi,%ebp + addl 24(%esp),%edx + psrld $31,%xmm7 + xorl %ebx,%eax + roll $5,%edi + movdqa %xmm1,%xmm0 + addl %esi,%edx + andl %eax,%ebp + xorl %ebx,%eax + psrld $30,%xmm1 + addl %edi,%edx + rorl $7,%edi + por %xmm7,%xmm5 + xorl %ebx,%ebp + movl %edx,%esi + addl 28(%esp),%ecx + pslld $2,%xmm0 + xorl %eax,%edi + roll $5,%edx + pxor %xmm1,%xmm5 + movdqa 112(%esp),%xmm1 + addl %ebp,%ecx + andl %edi,%esi + pxor %xmm0,%xmm5 + pshufd $238,%xmm2,%xmm6 + xorl %eax,%edi + addl %edx,%ecx + rorl $7,%edx + xorl %eax,%esi + movl %ecx,%ebp + punpcklqdq %xmm3,%xmm6 + movdqa %xmm5,%xmm0 + addl 32(%esp),%ebx + xorl %edi,%edx + paddd %xmm5,%xmm1 + movdqa %xmm2,96(%esp) + roll $5,%ecx + addl %esi,%ebx + psrldq $4,%xmm0 + andl %edx,%ebp + xorl %edi,%edx + pxor %xmm2,%xmm6 + addl %ecx,%ebx + rorl $7,%ecx + pxor %xmm4,%xmm0 + xorl %edi,%ebp + movl %ebx,%esi + addl 36(%esp),%eax + pxor %xmm0,%xmm6 + xorl %edx,%ecx + roll $5,%ebx + movdqa %xmm1,16(%esp) + addl %ebp,%eax + andl %ecx,%esi + movdqa %xmm6,%xmm2 + xorl %edx,%ecx + addl %ebx,%eax + rorl $7,%ebx + movdqa %xmm6,%xmm0 + xorl %edx,%esi + pslldq $12,%xmm2 + paddd %xmm6,%xmm6 + movl %eax,%ebp + addl 40(%esp),%edi + psrld $31,%xmm0 + xorl %ecx,%ebx + roll $5,%eax + movdqa %xmm2,%xmm1 + addl %esi,%edi + andl %ebx,%ebp + xorl %ecx,%ebx + psrld $30,%xmm2 + addl %eax,%edi + rorl $7,%eax + por %xmm0,%xmm6 + xorl %ecx,%ebp + movdqa 64(%esp),%xmm0 + movl %edi,%esi + addl 44(%esp),%edx + pslld $2,%xmm1 + xorl %ebx,%eax + roll $5,%edi + pxor %xmm2,%xmm6 + movdqa 112(%esp),%xmm2 + addl %ebp,%edx + andl %eax,%esi + pxor %xmm1,%xmm6 + pshufd $238,%xmm3,%xmm7 + xorl %ebx,%eax + addl %edi,%edx + rorl $7,%edi + xorl %ebx,%esi + movl %edx,%ebp + punpcklqdq %xmm4,%xmm7 + movdqa %xmm6,%xmm1 + addl 48(%esp),%ecx + xorl %eax,%edi + paddd %xmm6,%xmm2 + movdqa %xmm3,64(%esp) + roll $5,%edx + addl %esi,%ecx + psrldq $4,%xmm1 + andl %edi,%ebp + xorl %eax,%edi + pxor %xmm3,%xmm7 + addl %edx,%ecx + rorl $7,%edx + pxor %xmm5,%xmm1 + xorl %eax,%ebp + movl %ecx,%esi + addl 52(%esp),%ebx + pxor %xmm1,%xmm7 + xorl %edi,%edx + roll $5,%ecx + movdqa %xmm2,32(%esp) + addl %ebp,%ebx + andl %edx,%esi + movdqa %xmm7,%xmm3 + xorl %edi,%edx + addl %ecx,%ebx + rorl $7,%ecx + movdqa %xmm7,%xmm1 + xorl %edi,%esi + pslldq $12,%xmm3 + paddd %xmm7,%xmm7 + movl %ebx,%ebp + addl 56(%esp),%eax + psrld $31,%xmm1 + xorl %edx,%ecx + roll $5,%ebx + movdqa %xmm3,%xmm2 + addl %esi,%eax + andl %ecx,%ebp + xorl %edx,%ecx + psrld $30,%xmm3 + addl %ebx,%eax + rorl $7,%ebx + por %xmm1,%xmm7 + xorl %edx,%ebp + movdqa 80(%esp),%xmm1 + movl %eax,%esi + addl 60(%esp),%edi + pslld $2,%xmm2 + xorl %ecx,%ebx + roll $5,%eax + pxor %xmm3,%xmm7 + movdqa 112(%esp),%xmm3 + addl %ebp,%edi + andl %ebx,%esi + pxor %xmm2,%xmm7 + pshufd $238,%xmm6,%xmm2 + xorl %ecx,%ebx + addl %eax,%edi + rorl $7,%eax + pxor %xmm4,%xmm0 + punpcklqdq %xmm7,%xmm2 + xorl %ecx,%esi + movl %edi,%ebp + addl (%esp),%edx + pxor %xmm1,%xmm0 + movdqa %xmm4,80(%esp) + xorl %ebx,%eax + roll $5,%edi + movdqa %xmm3,%xmm4 + addl %esi,%edx + paddd %xmm7,%xmm3 + andl %eax,%ebp + pxor %xmm2,%xmm0 + xorl %ebx,%eax + addl %edi,%edx + rorl $7,%edi + xorl %ebx,%ebp + movdqa %xmm0,%xmm2 + movdqa %xmm3,48(%esp) + movl %edx,%esi + addl 4(%esp),%ecx + xorl %eax,%edi + roll $5,%edx + pslld $2,%xmm0 + addl %ebp,%ecx + andl %edi,%esi + psrld $30,%xmm2 + xorl %eax,%edi + addl %edx,%ecx + rorl $7,%edx + xorl %eax,%esi + movl %ecx,%ebp + addl 8(%esp),%ebx + xorl %edi,%edx + roll $5,%ecx + por %xmm2,%xmm0 + addl %esi,%ebx + andl %edx,%ebp + movdqa 96(%esp),%xmm2 + xorl %edi,%edx + addl %ecx,%ebx + addl 12(%esp),%eax + xorl %edi,%ebp + movl %ebx,%esi + pshufd $238,%xmm7,%xmm3 + roll $5,%ebx + addl %ebp,%eax + xorl %edx,%esi + rorl $7,%ecx + addl %ebx,%eax + addl 16(%esp),%edi + pxor %xmm5,%xmm1 + punpcklqdq %xmm0,%xmm3 + xorl %ecx,%esi + movl %eax,%ebp + roll $5,%eax + pxor %xmm2,%xmm1 + movdqa %xmm5,96(%esp) + addl %esi,%edi + xorl %ecx,%ebp + movdqa %xmm4,%xmm5 + rorl $7,%ebx + paddd %xmm0,%xmm4 + addl %eax,%edi + pxor %xmm3,%xmm1 + addl 20(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + roll $5,%edi + movdqa %xmm1,%xmm3 + movdqa %xmm4,(%esp) + addl %ebp,%edx + xorl %ebx,%esi + rorl $7,%eax + addl %edi,%edx + pslld $2,%xmm1 + addl 24(%esp),%ecx + xorl %eax,%esi + psrld $30,%xmm3 + movl %edx,%ebp + roll $5,%edx + addl %esi,%ecx + xorl %eax,%ebp + rorl $7,%edi + addl %edx,%ecx + por %xmm3,%xmm1 + addl 28(%esp),%ebx + xorl %edi,%ebp + movdqa 64(%esp),%xmm3 + movl %ecx,%esi + roll $5,%ecx + addl %ebp,%ebx + xorl %edi,%esi + rorl $7,%edx + pshufd $238,%xmm0,%xmm4 + addl %ecx,%ebx + addl 32(%esp),%eax + pxor %xmm6,%xmm2 + punpcklqdq %xmm1,%xmm4 + xorl %edx,%esi + movl %ebx,%ebp + roll $5,%ebx + pxor %xmm3,%xmm2 + movdqa %xmm6,64(%esp) + addl %esi,%eax + xorl %edx,%ebp + movdqa 128(%esp),%xmm6 + rorl $7,%ecx + paddd %xmm1,%xmm5 + addl %ebx,%eax + pxor %xmm4,%xmm2 + addl 36(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + roll $5,%eax + movdqa %xmm2,%xmm4 + movdqa %xmm5,16(%esp) + addl %ebp,%edi + xorl %ecx,%esi + rorl $7,%ebx + addl %eax,%edi + pslld $2,%xmm2 + addl 40(%esp),%edx + xorl %ebx,%esi + psrld $30,%xmm4 + movl %edi,%ebp + roll $5,%edi + addl %esi,%edx + xorl %ebx,%ebp + rorl $7,%eax + addl %edi,%edx + por %xmm4,%xmm2 + addl 44(%esp),%ecx + xorl %eax,%ebp + movdqa 80(%esp),%xmm4 + movl %edx,%esi + roll $5,%edx + addl %ebp,%ecx + xorl %eax,%esi + rorl $7,%edi + pshufd $238,%xmm1,%xmm5 + addl %edx,%ecx + addl 48(%esp),%ebx + pxor %xmm7,%xmm3 + punpcklqdq %xmm2,%xmm5 + xorl %edi,%esi + movl %ecx,%ebp + roll $5,%ecx + pxor %xmm4,%xmm3 + movdqa %xmm7,80(%esp) + addl %esi,%ebx + xorl %edi,%ebp + movdqa %xmm6,%xmm7 + rorl $7,%edx + paddd %xmm2,%xmm6 + addl %ecx,%ebx + pxor %xmm5,%xmm3 + addl 52(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + roll $5,%ebx + movdqa %xmm3,%xmm5 + movdqa %xmm6,32(%esp) + addl %ebp,%eax + xorl %edx,%esi + rorl $7,%ecx + addl %ebx,%eax + pslld $2,%xmm3 + addl 56(%esp),%edi + xorl %ecx,%esi + psrld $30,%xmm5 + movl %eax,%ebp + roll $5,%eax + addl %esi,%edi + xorl %ecx,%ebp + rorl $7,%ebx + addl %eax,%edi + por %xmm5,%xmm3 + addl 60(%esp),%edx + xorl %ebx,%ebp + movdqa 96(%esp),%xmm5 + movl %edi,%esi + roll $5,%edi + addl %ebp,%edx + xorl %ebx,%esi + rorl $7,%eax + pshufd $238,%xmm2,%xmm6 + addl %edi,%edx + addl (%esp),%ecx + pxor %xmm0,%xmm4 + punpcklqdq %xmm3,%xmm6 + xorl %eax,%esi + movl %edx,%ebp + roll $5,%edx + pxor %xmm5,%xmm4 + movdqa %xmm0,96(%esp) + addl %esi,%ecx + xorl %eax,%ebp + movdqa %xmm7,%xmm0 + rorl $7,%edi + paddd %xmm3,%xmm7 + addl %edx,%ecx + pxor %xmm6,%xmm4 + addl 4(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + roll $5,%ecx + movdqa %xmm4,%xmm6 + movdqa %xmm7,48(%esp) + addl %ebp,%ebx + xorl %edi,%esi + rorl $7,%edx + addl %ecx,%ebx + pslld $2,%xmm4 + addl 8(%esp),%eax + xorl %edx,%esi + psrld $30,%xmm6 + movl %ebx,%ebp + roll $5,%ebx + addl %esi,%eax + xorl %edx,%ebp + rorl $7,%ecx + addl %ebx,%eax + por %xmm6,%xmm4 + addl 12(%esp),%edi + xorl %ecx,%ebp + movdqa 64(%esp),%xmm6 + movl %eax,%esi + roll $5,%eax + addl %ebp,%edi + xorl %ecx,%esi + rorl $7,%ebx + pshufd $238,%xmm3,%xmm7 + addl %eax,%edi + addl 16(%esp),%edx + pxor %xmm1,%xmm5 + punpcklqdq %xmm4,%xmm7 + xorl %ebx,%esi + movl %edi,%ebp + roll $5,%edi + pxor %xmm6,%xmm5 + movdqa %xmm1,64(%esp) + addl %esi,%edx + xorl %ebx,%ebp + movdqa %xmm0,%xmm1 + rorl $7,%eax + paddd %xmm4,%xmm0 + addl %edi,%edx + pxor %xmm7,%xmm5 + addl 20(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + roll $5,%edx + movdqa %xmm5,%xmm7 + movdqa %xmm0,(%esp) + addl %ebp,%ecx + xorl %eax,%esi + rorl $7,%edi + addl %edx,%ecx + pslld $2,%xmm5 + addl 24(%esp),%ebx + xorl %edi,%esi + psrld $30,%xmm7 + movl %ecx,%ebp + roll $5,%ecx + addl %esi,%ebx + xorl %edi,%ebp + rorl $7,%edx + addl %ecx,%ebx + por %xmm7,%xmm5 + addl 28(%esp),%eax + movdqa 80(%esp),%xmm7 + rorl $7,%ecx + movl %ebx,%esi + xorl %edx,%ebp + roll $5,%ebx + pshufd $238,%xmm4,%xmm0 + addl %ebp,%eax + xorl %ecx,%esi + xorl %edx,%ecx + addl %ebx,%eax + addl 32(%esp),%edi + pxor %xmm2,%xmm6 + punpcklqdq %xmm5,%xmm0 + andl %ecx,%esi + xorl %edx,%ecx + rorl $7,%ebx + pxor %xmm7,%xmm6 + movdqa %xmm2,80(%esp) + movl %eax,%ebp + xorl %ecx,%esi + roll $5,%eax + movdqa %xmm1,%xmm2 + addl %esi,%edi + paddd %xmm5,%xmm1 + xorl %ebx,%ebp + pxor %xmm0,%xmm6 + xorl %ecx,%ebx + addl %eax,%edi + addl 36(%esp),%edx + andl %ebx,%ebp + movdqa %xmm6,%xmm0 + movdqa %xmm1,16(%esp) + xorl %ecx,%ebx + rorl $7,%eax + movl %edi,%esi + xorl %ebx,%ebp + roll $5,%edi + pslld $2,%xmm6 + addl %ebp,%edx + xorl %eax,%esi + psrld $30,%xmm0 + xorl %ebx,%eax + addl %edi,%edx + addl 40(%esp),%ecx + andl %eax,%esi + xorl %ebx,%eax + rorl $7,%edi + por %xmm0,%xmm6 + movl %edx,%ebp + xorl %eax,%esi + movdqa 96(%esp),%xmm0 + roll $5,%edx + addl %esi,%ecx + xorl %edi,%ebp + xorl %eax,%edi + addl %edx,%ecx + pshufd $238,%xmm5,%xmm1 + addl 44(%esp),%ebx + andl %edi,%ebp + xorl %eax,%edi + rorl $7,%edx + movl %ecx,%esi + xorl %edi,%ebp + roll $5,%ecx + addl %ebp,%ebx + xorl %edx,%esi + xorl %edi,%edx + addl %ecx,%ebx + addl 48(%esp),%eax + pxor %xmm3,%xmm7 + punpcklqdq %xmm6,%xmm1 + andl %edx,%esi + xorl %edi,%edx + rorl $7,%ecx + pxor %xmm0,%xmm7 + movdqa %xmm3,96(%esp) + movl %ebx,%ebp + xorl %edx,%esi + roll $5,%ebx + movdqa 144(%esp),%xmm3 + addl %esi,%eax + paddd %xmm6,%xmm2 + xorl %ecx,%ebp + pxor %xmm1,%xmm7 + xorl %edx,%ecx + addl %ebx,%eax + addl 52(%esp),%edi + andl %ecx,%ebp + movdqa %xmm7,%xmm1 + movdqa %xmm2,32(%esp) + xorl %edx,%ecx + rorl $7,%ebx + movl %eax,%esi + xorl %ecx,%ebp + roll $5,%eax + pslld $2,%xmm7 + addl %ebp,%edi + xorl %ebx,%esi + psrld $30,%xmm1 + xorl %ecx,%ebx + addl %eax,%edi + addl 56(%esp),%edx + andl %ebx,%esi + xorl %ecx,%ebx + rorl $7,%eax + por %xmm1,%xmm7 + movl %edi,%ebp + xorl %ebx,%esi + movdqa 64(%esp),%xmm1 + roll $5,%edi + addl %esi,%edx + xorl %eax,%ebp + xorl %ebx,%eax + addl %edi,%edx + pshufd $238,%xmm6,%xmm2 + addl 60(%esp),%ecx + andl %eax,%ebp + xorl %ebx,%eax + rorl $7,%edi + movl %edx,%esi + xorl %eax,%ebp + roll $5,%edx + addl %ebp,%ecx + xorl %edi,%esi + xorl %eax,%edi + addl %edx,%ecx + addl (%esp),%ebx + pxor %xmm4,%xmm0 + punpcklqdq %xmm7,%xmm2 + andl %edi,%esi + xorl %eax,%edi + rorl $7,%edx + pxor %xmm1,%xmm0 + movdqa %xmm4,64(%esp) + movl %ecx,%ebp + xorl %edi,%esi + roll $5,%ecx + movdqa %xmm3,%xmm4 + addl %esi,%ebx + paddd %xmm7,%xmm3 + xorl %edx,%ebp + pxor %xmm2,%xmm0 + xorl %edi,%edx + addl %ecx,%ebx + addl 4(%esp),%eax + andl %edx,%ebp + movdqa %xmm0,%xmm2 + movdqa %xmm3,48(%esp) + xorl %edi,%edx + rorl $7,%ecx + movl %ebx,%esi + xorl %edx,%ebp + roll $5,%ebx + pslld $2,%xmm0 + addl %ebp,%eax + xorl %ecx,%esi + psrld $30,%xmm2 + xorl %edx,%ecx + addl %ebx,%eax + addl 8(%esp),%edi + andl %ecx,%esi + xorl %edx,%ecx + rorl $7,%ebx + por %xmm2,%xmm0 + movl %eax,%ebp + xorl %ecx,%esi + movdqa 80(%esp),%xmm2 + roll $5,%eax + addl %esi,%edi + xorl %ebx,%ebp + xorl %ecx,%ebx + addl %eax,%edi + pshufd $238,%xmm7,%xmm3 + addl 12(%esp),%edx + andl %ebx,%ebp + xorl %ecx,%ebx + rorl $7,%eax + movl %edi,%esi + xorl %ebx,%ebp + roll $5,%edi + addl %ebp,%edx + xorl %eax,%esi + xorl %ebx,%eax + addl %edi,%edx + addl 16(%esp),%ecx + pxor %xmm5,%xmm1 + punpcklqdq %xmm0,%xmm3 + andl %eax,%esi + xorl %ebx,%eax + rorl $7,%edi + pxor %xmm2,%xmm1 + movdqa %xmm5,80(%esp) + movl %edx,%ebp + xorl %eax,%esi + roll $5,%edx + movdqa %xmm4,%xmm5 + addl %esi,%ecx + paddd %xmm0,%xmm4 + xorl %edi,%ebp + pxor %xmm3,%xmm1 + xorl %eax,%edi + addl %edx,%ecx + addl 20(%esp),%ebx + andl %edi,%ebp + movdqa %xmm1,%xmm3 + movdqa %xmm4,(%esp) + xorl %eax,%edi + rorl $7,%edx + movl %ecx,%esi + xorl %edi,%ebp + roll $5,%ecx + pslld $2,%xmm1 + addl %ebp,%ebx + xorl %edx,%esi + psrld $30,%xmm3 + xorl %edi,%edx + addl %ecx,%ebx + addl 24(%esp),%eax + andl %edx,%esi + xorl %edi,%edx + rorl $7,%ecx + por %xmm3,%xmm1 + movl %ebx,%ebp + xorl %edx,%esi + movdqa 96(%esp),%xmm3 + roll $5,%ebx + addl %esi,%eax + xorl %ecx,%ebp + xorl %edx,%ecx + addl %ebx,%eax + pshufd $238,%xmm0,%xmm4 + addl 28(%esp),%edi + andl %ecx,%ebp + xorl %edx,%ecx + rorl $7,%ebx + movl %eax,%esi + xorl %ecx,%ebp + roll $5,%eax + addl %ebp,%edi + xorl %ebx,%esi + xorl %ecx,%ebx + addl %eax,%edi + addl 32(%esp),%edx + pxor %xmm6,%xmm2 + punpcklqdq %xmm1,%xmm4 + andl %ebx,%esi + xorl %ecx,%ebx + rorl $7,%eax + pxor %xmm3,%xmm2 + movdqa %xmm6,96(%esp) + movl %edi,%ebp + xorl %ebx,%esi + roll $5,%edi + movdqa %xmm5,%xmm6 + addl %esi,%edx + paddd %xmm1,%xmm5 + xorl %eax,%ebp + pxor %xmm4,%xmm2 + xorl %ebx,%eax + addl %edi,%edx + addl 36(%esp),%ecx + andl %eax,%ebp + movdqa %xmm2,%xmm4 + movdqa %xmm5,16(%esp) + xorl %ebx,%eax + rorl $7,%edi + movl %edx,%esi + xorl %eax,%ebp + roll $5,%edx + pslld $2,%xmm2 + addl %ebp,%ecx + xorl %edi,%esi + psrld $30,%xmm4 + xorl %eax,%edi + addl %edx,%ecx + addl 40(%esp),%ebx + andl %edi,%esi + xorl %eax,%edi + rorl $7,%edx + por %xmm4,%xmm2 + movl %ecx,%ebp + xorl %edi,%esi + movdqa 64(%esp),%xmm4 + roll $5,%ecx + addl %esi,%ebx + xorl %edx,%ebp + xorl %edi,%edx + addl %ecx,%ebx + pshufd $238,%xmm1,%xmm5 + addl 44(%esp),%eax + andl %edx,%ebp + xorl %edi,%edx + rorl $7,%ecx + movl %ebx,%esi + xorl %edx,%ebp + roll $5,%ebx + addl %ebp,%eax + xorl %edx,%esi + addl %ebx,%eax + addl 48(%esp),%edi + pxor %xmm7,%xmm3 + punpcklqdq %xmm2,%xmm5 + xorl %ecx,%esi + movl %eax,%ebp + roll $5,%eax + pxor %xmm4,%xmm3 + movdqa %xmm7,64(%esp) + addl %esi,%edi + xorl %ecx,%ebp + movdqa %xmm6,%xmm7 + rorl $7,%ebx + paddd %xmm2,%xmm6 + addl %eax,%edi + pxor %xmm5,%xmm3 + addl 52(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + roll $5,%edi + movdqa %xmm3,%xmm5 + movdqa %xmm6,32(%esp) + addl %ebp,%edx + xorl %ebx,%esi + rorl $7,%eax + addl %edi,%edx + pslld $2,%xmm3 + addl 56(%esp),%ecx + xorl %eax,%esi + psrld $30,%xmm5 + movl %edx,%ebp + roll $5,%edx + addl %esi,%ecx + xorl %eax,%ebp + rorl $7,%edi + addl %edx,%ecx + por %xmm5,%xmm3 + addl 60(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + roll $5,%ecx + addl %ebp,%ebx + xorl %edi,%esi + rorl $7,%edx + addl %ecx,%ebx + addl (%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + roll $5,%ebx + addl %esi,%eax + xorl %edx,%ebp + rorl $7,%ecx + paddd %xmm3,%xmm7 + addl %ebx,%eax + addl 4(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + movdqa %xmm7,48(%esp) + roll $5,%eax + addl %ebp,%edi + xorl %ecx,%esi + rorl $7,%ebx + addl %eax,%edi + addl 8(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + roll $5,%edi + addl %esi,%edx + xorl %ebx,%ebp + rorl $7,%eax + addl %edi,%edx + addl 12(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + roll $5,%edx + addl %ebp,%ecx + xorl %eax,%esi + rorl $7,%edi + addl %edx,%ecx + movl 196(%esp),%ebp + cmpl 200(%esp),%ebp + je L007done + movdqa 160(%esp),%xmm7 + movdqa 176(%esp),%xmm6 + movdqu (%ebp),%xmm0 + movdqu 16(%ebp),%xmm1 + movdqu 32(%ebp),%xmm2 + movdqu 48(%ebp),%xmm3 + addl $64,%ebp +.byte 102,15,56,0,198 + movl %ebp,196(%esp) + movdqa %xmm7,96(%esp) + addl 16(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + roll $5,%ecx + addl %esi,%ebx + xorl %edi,%ebp + rorl $7,%edx +.byte 102,15,56,0,206 + addl %ecx,%ebx + addl 20(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + paddd %xmm7,%xmm0 + roll $5,%ebx + addl %ebp,%eax + xorl %edx,%esi + rorl $7,%ecx + movdqa %xmm0,(%esp) + addl %ebx,%eax + addl 24(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + psubd %xmm7,%xmm0 + roll $5,%eax + addl %esi,%edi + xorl %ecx,%ebp + rorl $7,%ebx + addl %eax,%edi + addl 28(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + roll $5,%edi + addl %ebp,%edx + xorl %ebx,%esi + rorl $7,%eax + addl %edi,%edx + addl 32(%esp),%ecx + xorl %eax,%esi + movl %edx,%ebp + roll $5,%edx + addl %esi,%ecx + xorl %eax,%ebp + rorl $7,%edi +.byte 102,15,56,0,214 + addl %edx,%ecx + addl 36(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + paddd %xmm7,%xmm1 + roll $5,%ecx + addl %ebp,%ebx + xorl %edi,%esi + rorl $7,%edx + movdqa %xmm1,16(%esp) + addl %ecx,%ebx + addl 40(%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + psubd %xmm7,%xmm1 + roll $5,%ebx + addl %esi,%eax + xorl %edx,%ebp + rorl $7,%ecx + addl %ebx,%eax + addl 44(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + roll $5,%eax + addl %ebp,%edi + xorl %ecx,%esi + rorl $7,%ebx + addl %eax,%edi + addl 48(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + roll $5,%edi + addl %esi,%edx + xorl %ebx,%ebp + rorl $7,%eax +.byte 102,15,56,0,222 + addl %edi,%edx + addl 52(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + paddd %xmm7,%xmm2 + roll $5,%edx + addl %ebp,%ecx + xorl %eax,%esi + rorl $7,%edi + movdqa %xmm2,32(%esp) + addl %edx,%ecx + addl 56(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + psubd %xmm7,%xmm2 + roll $5,%ecx + addl %esi,%ebx + xorl %edi,%ebp + rorl $7,%edx + addl %ecx,%ebx + addl 60(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + roll $5,%ebx + addl %ebp,%eax + rorl $7,%ecx + addl %ebx,%eax + movl 192(%esp),%ebp + addl (%ebp),%eax + addl 4(%ebp),%esi + addl 8(%ebp),%ecx + movl %eax,(%ebp) + addl 12(%ebp),%edx + movl %esi,4(%ebp) + addl 16(%ebp),%edi + movl %ecx,8(%ebp) + movl %ecx,%ebx + movl %edx,12(%ebp) + xorl %edx,%ebx + movl %edi,16(%ebp) + movl %esi,%ebp + pshufd $238,%xmm0,%xmm4 + andl %ebx,%esi + movl %ebp,%ebx + jmp L006loop +.align 4,0x90 +L007done: + addl 16(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + roll $5,%ecx + addl %esi,%ebx + xorl %edi,%ebp + rorl $7,%edx + addl %ecx,%ebx + addl 20(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + roll $5,%ebx + addl %ebp,%eax + xorl %edx,%esi + rorl $7,%ecx + addl %ebx,%eax + addl 24(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + roll $5,%eax + addl %esi,%edi + xorl %ecx,%ebp + rorl $7,%ebx + addl %eax,%edi + addl 28(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + roll $5,%edi + addl %ebp,%edx + xorl %ebx,%esi + rorl $7,%eax + addl %edi,%edx + addl 32(%esp),%ecx + xorl %eax,%esi + movl %edx,%ebp + roll $5,%edx + addl %esi,%ecx + xorl %eax,%ebp + rorl $7,%edi + addl %edx,%ecx + addl 36(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + roll $5,%ecx + addl %ebp,%ebx + xorl %edi,%esi + rorl $7,%edx + addl %ecx,%ebx + addl 40(%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + roll $5,%ebx + addl %esi,%eax + xorl %edx,%ebp + rorl $7,%ecx + addl %ebx,%eax + addl 44(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + roll $5,%eax + addl %ebp,%edi + xorl %ecx,%esi + rorl $7,%ebx + addl %eax,%edi + addl 48(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + roll $5,%edi + addl %esi,%edx + xorl %ebx,%ebp + rorl $7,%eax + addl %edi,%edx + addl 52(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + roll $5,%edx + addl %ebp,%ecx + xorl %eax,%esi + rorl $7,%edi + addl %edx,%ecx + addl 56(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + roll $5,%ecx + addl %esi,%ebx + xorl %edi,%ebp + rorl $7,%edx + addl %ecx,%ebx + addl 60(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + roll $5,%ebx + addl %ebp,%eax + rorl $7,%ecx + addl %ebx,%eax + movl 192(%esp),%ebp + addl (%ebp),%eax + movl 204(%esp),%esp + addl 4(%ebp),%esi + addl 8(%ebp),%ecx + movl %eax,(%ebp) + addl 12(%ebp),%edx + movl %esi,4(%ebp) + addl 16(%ebp),%edi + movl %ecx,8(%ebp) + movl %edx,12(%ebp) + movl %edi,16(%ebp) + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.type __sha1_block_data_order_avx,@function +.align 4 +__sha1_block_data_order_avx: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + call L008pic_point +L008pic_point: + popl %ebp + leal LK_XX_XX-L008pic_point(%ebp),%ebp +Lavx_shortcut: + vzeroall + vmovdqa (%ebp),%xmm7 + vmovdqa 16(%ebp),%xmm0 + vmovdqa 32(%ebp),%xmm1 + vmovdqa 48(%ebp),%xmm2 + vmovdqa 64(%ebp),%xmm6 + movl 20(%esp),%edi + movl 24(%esp),%ebp + movl 28(%esp),%edx + movl %esp,%esi + subl $208,%esp + andl $-64,%esp + vmovdqa %xmm0,112(%esp) + vmovdqa %xmm1,128(%esp) + vmovdqa %xmm2,144(%esp) + shll $6,%edx + vmovdqa %xmm7,160(%esp) + addl %ebp,%edx + vmovdqa %xmm6,176(%esp) + addl $64,%ebp + movl %edi,192(%esp) + movl %ebp,196(%esp) + movl %edx,200(%esp) + movl %esi,204(%esp) + movl (%edi),%eax + movl 4(%edi),%ebx + movl 8(%edi),%ecx + movl 12(%edi),%edx + movl 16(%edi),%edi + movl %ebx,%esi + vmovdqu -64(%ebp),%xmm0 + vmovdqu -48(%ebp),%xmm1 + vmovdqu -32(%ebp),%xmm2 + vmovdqu -16(%ebp),%xmm3 + vpshufb %xmm6,%xmm0,%xmm0 + vpshufb %xmm6,%xmm1,%xmm1 + vpshufb %xmm6,%xmm2,%xmm2 + vmovdqa %xmm7,96(%esp) + vpshufb %xmm6,%xmm3,%xmm3 + vpaddd %xmm7,%xmm0,%xmm4 + vpaddd %xmm7,%xmm1,%xmm5 + vpaddd %xmm7,%xmm2,%xmm6 + vmovdqa %xmm4,(%esp) + movl %ecx,%ebp + vmovdqa %xmm5,16(%esp) + xorl %edx,%ebp + vmovdqa %xmm6,32(%esp) + andl %ebp,%esi + jmp L009loop +.align 4,0x90 +L009loop: + shrdl $2,%ebx,%ebx + xorl %edx,%esi + vpalignr $8,%xmm0,%xmm1,%xmm4 + movl %eax,%ebp + addl (%esp),%edi + vpaddd %xmm3,%xmm7,%xmm7 + vmovdqa %xmm0,64(%esp) + xorl %ecx,%ebx + shldl $5,%eax,%eax + vpsrldq $4,%xmm3,%xmm6 + addl %esi,%edi + andl %ebx,%ebp + vpxor %xmm0,%xmm4,%xmm4 + xorl %ecx,%ebx + addl %eax,%edi + vpxor %xmm2,%xmm6,%xmm6 + shrdl $7,%eax,%eax + xorl %ecx,%ebp + vmovdqa %xmm7,48(%esp) + movl %edi,%esi + addl 4(%esp),%edx + vpxor %xmm6,%xmm4,%xmm4 + xorl %ebx,%eax + shldl $5,%edi,%edi + addl %ebp,%edx + andl %eax,%esi + vpsrld $31,%xmm4,%xmm6 + xorl %ebx,%eax + addl %edi,%edx + shrdl $7,%edi,%edi + xorl %ebx,%esi + vpslldq $12,%xmm4,%xmm0 + vpaddd %xmm4,%xmm4,%xmm4 + movl %edx,%ebp + addl 8(%esp),%ecx + xorl %eax,%edi + shldl $5,%edx,%edx + vpsrld $30,%xmm0,%xmm7 + vpor %xmm6,%xmm4,%xmm4 + addl %esi,%ecx + andl %edi,%ebp + xorl %eax,%edi + addl %edx,%ecx + vpslld $2,%xmm0,%xmm0 + shrdl $7,%edx,%edx + xorl %eax,%ebp + vpxor %xmm7,%xmm4,%xmm4 + movl %ecx,%esi + addl 12(%esp),%ebx + xorl %edi,%edx + shldl $5,%ecx,%ecx + vpxor %xmm0,%xmm4,%xmm4 + addl %ebp,%ebx + andl %edx,%esi + vmovdqa 96(%esp),%xmm0 + xorl %edi,%edx + addl %ecx,%ebx + shrdl $7,%ecx,%ecx + xorl %edi,%esi + vpalignr $8,%xmm1,%xmm2,%xmm5 + movl %ebx,%ebp + addl 16(%esp),%eax + vpaddd %xmm4,%xmm0,%xmm0 + vmovdqa %xmm1,80(%esp) + xorl %edx,%ecx + shldl $5,%ebx,%ebx + vpsrldq $4,%xmm4,%xmm7 + addl %esi,%eax + andl %ecx,%ebp + vpxor %xmm1,%xmm5,%xmm5 + xorl %edx,%ecx + addl %ebx,%eax + vpxor %xmm3,%xmm7,%xmm7 + shrdl $7,%ebx,%ebx + xorl %edx,%ebp + vmovdqa %xmm0,(%esp) + movl %eax,%esi + addl 20(%esp),%edi + vpxor %xmm7,%xmm5,%xmm5 + xorl %ecx,%ebx + shldl $5,%eax,%eax + addl %ebp,%edi + andl %ebx,%esi + vpsrld $31,%xmm5,%xmm7 + xorl %ecx,%ebx + addl %eax,%edi + shrdl $7,%eax,%eax + xorl %ecx,%esi + vpslldq $12,%xmm5,%xmm1 + vpaddd %xmm5,%xmm5,%xmm5 + movl %edi,%ebp + addl 24(%esp),%edx + xorl %ebx,%eax + shldl $5,%edi,%edi + vpsrld $30,%xmm1,%xmm0 + vpor %xmm7,%xmm5,%xmm5 + addl %esi,%edx + andl %eax,%ebp + xorl %ebx,%eax + addl %edi,%edx + vpslld $2,%xmm1,%xmm1 + shrdl $7,%edi,%edi + xorl %ebx,%ebp + vpxor %xmm0,%xmm5,%xmm5 + movl %edx,%esi + addl 28(%esp),%ecx + xorl %eax,%edi + shldl $5,%edx,%edx + vpxor %xmm1,%xmm5,%xmm5 + addl %ebp,%ecx + andl %edi,%esi + vmovdqa 112(%esp),%xmm1 + xorl %eax,%edi + addl %edx,%ecx + shrdl $7,%edx,%edx + xorl %eax,%esi + vpalignr $8,%xmm2,%xmm3,%xmm6 + movl %ecx,%ebp + addl 32(%esp),%ebx + vpaddd %xmm5,%xmm1,%xmm1 + vmovdqa %xmm2,96(%esp) + xorl %edi,%edx + shldl $5,%ecx,%ecx + vpsrldq $4,%xmm5,%xmm0 + addl %esi,%ebx + andl %edx,%ebp + vpxor %xmm2,%xmm6,%xmm6 + xorl %edi,%edx + addl %ecx,%ebx + vpxor %xmm4,%xmm0,%xmm0 + shrdl $7,%ecx,%ecx + xorl %edi,%ebp + vmovdqa %xmm1,16(%esp) + movl %ebx,%esi + addl 36(%esp),%eax + vpxor %xmm0,%xmm6,%xmm6 + xorl %edx,%ecx + shldl $5,%ebx,%ebx + addl %ebp,%eax + andl %ecx,%esi + vpsrld $31,%xmm6,%xmm0 + xorl %edx,%ecx + addl %ebx,%eax + shrdl $7,%ebx,%ebx + xorl %edx,%esi + vpslldq $12,%xmm6,%xmm2 + vpaddd %xmm6,%xmm6,%xmm6 + movl %eax,%ebp + addl 40(%esp),%edi + xorl %ecx,%ebx + shldl $5,%eax,%eax + vpsrld $30,%xmm2,%xmm1 + vpor %xmm0,%xmm6,%xmm6 + addl %esi,%edi + andl %ebx,%ebp + xorl %ecx,%ebx + addl %eax,%edi + vpslld $2,%xmm2,%xmm2 + vmovdqa 64(%esp),%xmm0 + shrdl $7,%eax,%eax + xorl %ecx,%ebp + vpxor %xmm1,%xmm6,%xmm6 + movl %edi,%esi + addl 44(%esp),%edx + xorl %ebx,%eax + shldl $5,%edi,%edi + vpxor %xmm2,%xmm6,%xmm6 + addl %ebp,%edx + andl %eax,%esi + vmovdqa 112(%esp),%xmm2 + xorl %ebx,%eax + addl %edi,%edx + shrdl $7,%edi,%edi + xorl %ebx,%esi + vpalignr $8,%xmm3,%xmm4,%xmm7 + movl %edx,%ebp + addl 48(%esp),%ecx + vpaddd %xmm6,%xmm2,%xmm2 + vmovdqa %xmm3,64(%esp) + xorl %eax,%edi + shldl $5,%edx,%edx + vpsrldq $4,%xmm6,%xmm1 + addl %esi,%ecx + andl %edi,%ebp + vpxor %xmm3,%xmm7,%xmm7 + xorl %eax,%edi + addl %edx,%ecx + vpxor %xmm5,%xmm1,%xmm1 + shrdl $7,%edx,%edx + xorl %eax,%ebp + vmovdqa %xmm2,32(%esp) + movl %ecx,%esi + addl 52(%esp),%ebx + vpxor %xmm1,%xmm7,%xmm7 + xorl %edi,%edx + shldl $5,%ecx,%ecx + addl %ebp,%ebx + andl %edx,%esi + vpsrld $31,%xmm7,%xmm1 + xorl %edi,%edx + addl %ecx,%ebx + shrdl $7,%ecx,%ecx + xorl %edi,%esi + vpslldq $12,%xmm7,%xmm3 + vpaddd %xmm7,%xmm7,%xmm7 + movl %ebx,%ebp + addl 56(%esp),%eax + xorl %edx,%ecx + shldl $5,%ebx,%ebx + vpsrld $30,%xmm3,%xmm2 + vpor %xmm1,%xmm7,%xmm7 + addl %esi,%eax + andl %ecx,%ebp + xorl %edx,%ecx + addl %ebx,%eax + vpslld $2,%xmm3,%xmm3 + vmovdqa 80(%esp),%xmm1 + shrdl $7,%ebx,%ebx + xorl %edx,%ebp + vpxor %xmm2,%xmm7,%xmm7 + movl %eax,%esi + addl 60(%esp),%edi + xorl %ecx,%ebx + shldl $5,%eax,%eax + vpxor %xmm3,%xmm7,%xmm7 + addl %ebp,%edi + andl %ebx,%esi + vmovdqa 112(%esp),%xmm3 + xorl %ecx,%ebx + addl %eax,%edi + vpalignr $8,%xmm6,%xmm7,%xmm2 + vpxor %xmm4,%xmm0,%xmm0 + shrdl $7,%eax,%eax + xorl %ecx,%esi + movl %edi,%ebp + addl (%esp),%edx + vpxor %xmm1,%xmm0,%xmm0 + vmovdqa %xmm4,80(%esp) + xorl %ebx,%eax + shldl $5,%edi,%edi + vmovdqa %xmm3,%xmm4 + vpaddd %xmm7,%xmm3,%xmm3 + addl %esi,%edx + andl %eax,%ebp + vpxor %xmm2,%xmm0,%xmm0 + xorl %ebx,%eax + addl %edi,%edx + shrdl $7,%edi,%edi + xorl %ebx,%ebp + vpsrld $30,%xmm0,%xmm2 + vmovdqa %xmm3,48(%esp) + movl %edx,%esi + addl 4(%esp),%ecx + xorl %eax,%edi + shldl $5,%edx,%edx + vpslld $2,%xmm0,%xmm0 + addl %ebp,%ecx + andl %edi,%esi + xorl %eax,%edi + addl %edx,%ecx + shrdl $7,%edx,%edx + xorl %eax,%esi + movl %ecx,%ebp + addl 8(%esp),%ebx + vpor %xmm2,%xmm0,%xmm0 + xorl %edi,%edx + shldl $5,%ecx,%ecx + vmovdqa 96(%esp),%xmm2 + addl %esi,%ebx + andl %edx,%ebp + xorl %edi,%edx + addl %ecx,%ebx + addl 12(%esp),%eax + xorl %edi,%ebp + movl %ebx,%esi + shldl $5,%ebx,%ebx + addl %ebp,%eax + xorl %edx,%esi + shrdl $7,%ecx,%ecx + addl %ebx,%eax + vpalignr $8,%xmm7,%xmm0,%xmm3 + vpxor %xmm5,%xmm1,%xmm1 + addl 16(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + shldl $5,%eax,%eax + vpxor %xmm2,%xmm1,%xmm1 + vmovdqa %xmm5,96(%esp) + addl %esi,%edi + xorl %ecx,%ebp + vmovdqa %xmm4,%xmm5 + vpaddd %xmm0,%xmm4,%xmm4 + shrdl $7,%ebx,%ebx + addl %eax,%edi + vpxor %xmm3,%xmm1,%xmm1 + addl 20(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + shldl $5,%edi,%edi + vpsrld $30,%xmm1,%xmm3 + vmovdqa %xmm4,(%esp) + addl %ebp,%edx + xorl %ebx,%esi + shrdl $7,%eax,%eax + addl %edi,%edx + vpslld $2,%xmm1,%xmm1 + addl 24(%esp),%ecx + xorl %eax,%esi + movl %edx,%ebp + shldl $5,%edx,%edx + addl %esi,%ecx + xorl %eax,%ebp + shrdl $7,%edi,%edi + addl %edx,%ecx + vpor %xmm3,%xmm1,%xmm1 + addl 28(%esp),%ebx + xorl %edi,%ebp + vmovdqa 64(%esp),%xmm3 + movl %ecx,%esi + shldl $5,%ecx,%ecx + addl %ebp,%ebx + xorl %edi,%esi + shrdl $7,%edx,%edx + addl %ecx,%ebx + vpalignr $8,%xmm0,%xmm1,%xmm4 + vpxor %xmm6,%xmm2,%xmm2 + addl 32(%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + shldl $5,%ebx,%ebx + vpxor %xmm3,%xmm2,%xmm2 + vmovdqa %xmm6,64(%esp) + addl %esi,%eax + xorl %edx,%ebp + vmovdqa 128(%esp),%xmm6 + vpaddd %xmm1,%xmm5,%xmm5 + shrdl $7,%ecx,%ecx + addl %ebx,%eax + vpxor %xmm4,%xmm2,%xmm2 + addl 36(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + shldl $5,%eax,%eax + vpsrld $30,%xmm2,%xmm4 + vmovdqa %xmm5,16(%esp) + addl %ebp,%edi + xorl %ecx,%esi + shrdl $7,%ebx,%ebx + addl %eax,%edi + vpslld $2,%xmm2,%xmm2 + addl 40(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + shldl $5,%edi,%edi + addl %esi,%edx + xorl %ebx,%ebp + shrdl $7,%eax,%eax + addl %edi,%edx + vpor %xmm4,%xmm2,%xmm2 + addl 44(%esp),%ecx + xorl %eax,%ebp + vmovdqa 80(%esp),%xmm4 + movl %edx,%esi + shldl $5,%edx,%edx + addl %ebp,%ecx + xorl %eax,%esi + shrdl $7,%edi,%edi + addl %edx,%ecx + vpalignr $8,%xmm1,%xmm2,%xmm5 + vpxor %xmm7,%xmm3,%xmm3 + addl 48(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + shldl $5,%ecx,%ecx + vpxor %xmm4,%xmm3,%xmm3 + vmovdqa %xmm7,80(%esp) + addl %esi,%ebx + xorl %edi,%ebp + vmovdqa %xmm6,%xmm7 + vpaddd %xmm2,%xmm6,%xmm6 + shrdl $7,%edx,%edx + addl %ecx,%ebx + vpxor %xmm5,%xmm3,%xmm3 + addl 52(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + shldl $5,%ebx,%ebx + vpsrld $30,%xmm3,%xmm5 + vmovdqa %xmm6,32(%esp) + addl %ebp,%eax + xorl %edx,%esi + shrdl $7,%ecx,%ecx + addl %ebx,%eax + vpslld $2,%xmm3,%xmm3 + addl 56(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + shldl $5,%eax,%eax + addl %esi,%edi + xorl %ecx,%ebp + shrdl $7,%ebx,%ebx + addl %eax,%edi + vpor %xmm5,%xmm3,%xmm3 + addl 60(%esp),%edx + xorl %ebx,%ebp + vmovdqa 96(%esp),%xmm5 + movl %edi,%esi + shldl $5,%edi,%edi + addl %ebp,%edx + xorl %ebx,%esi + shrdl $7,%eax,%eax + addl %edi,%edx + vpalignr $8,%xmm2,%xmm3,%xmm6 + vpxor %xmm0,%xmm4,%xmm4 + addl (%esp),%ecx + xorl %eax,%esi + movl %edx,%ebp + shldl $5,%edx,%edx + vpxor %xmm5,%xmm4,%xmm4 + vmovdqa %xmm0,96(%esp) + addl %esi,%ecx + xorl %eax,%ebp + vmovdqa %xmm7,%xmm0 + vpaddd %xmm3,%xmm7,%xmm7 + shrdl $7,%edi,%edi + addl %edx,%ecx + vpxor %xmm6,%xmm4,%xmm4 + addl 4(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + shldl $5,%ecx,%ecx + vpsrld $30,%xmm4,%xmm6 + vmovdqa %xmm7,48(%esp) + addl %ebp,%ebx + xorl %edi,%esi + shrdl $7,%edx,%edx + addl %ecx,%ebx + vpslld $2,%xmm4,%xmm4 + addl 8(%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + shldl $5,%ebx,%ebx + addl %esi,%eax + xorl %edx,%ebp + shrdl $7,%ecx,%ecx + addl %ebx,%eax + vpor %xmm6,%xmm4,%xmm4 + addl 12(%esp),%edi + xorl %ecx,%ebp + vmovdqa 64(%esp),%xmm6 + movl %eax,%esi + shldl $5,%eax,%eax + addl %ebp,%edi + xorl %ecx,%esi + shrdl $7,%ebx,%ebx + addl %eax,%edi + vpalignr $8,%xmm3,%xmm4,%xmm7 + vpxor %xmm1,%xmm5,%xmm5 + addl 16(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + shldl $5,%edi,%edi + vpxor %xmm6,%xmm5,%xmm5 + vmovdqa %xmm1,64(%esp) + addl %esi,%edx + xorl %ebx,%ebp + vmovdqa %xmm0,%xmm1 + vpaddd %xmm4,%xmm0,%xmm0 + shrdl $7,%eax,%eax + addl %edi,%edx + vpxor %xmm7,%xmm5,%xmm5 + addl 20(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + shldl $5,%edx,%edx + vpsrld $30,%xmm5,%xmm7 + vmovdqa %xmm0,(%esp) + addl %ebp,%ecx + xorl %eax,%esi + shrdl $7,%edi,%edi + addl %edx,%ecx + vpslld $2,%xmm5,%xmm5 + addl 24(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + shldl $5,%ecx,%ecx + addl %esi,%ebx + xorl %edi,%ebp + shrdl $7,%edx,%edx + addl %ecx,%ebx + vpor %xmm7,%xmm5,%xmm5 + addl 28(%esp),%eax + vmovdqa 80(%esp),%xmm7 + shrdl $7,%ecx,%ecx + movl %ebx,%esi + xorl %edx,%ebp + shldl $5,%ebx,%ebx + addl %ebp,%eax + xorl %ecx,%esi + xorl %edx,%ecx + addl %ebx,%eax + vpalignr $8,%xmm4,%xmm5,%xmm0 + vpxor %xmm2,%xmm6,%xmm6 + addl 32(%esp),%edi + andl %ecx,%esi + xorl %edx,%ecx + shrdl $7,%ebx,%ebx + vpxor %xmm7,%xmm6,%xmm6 + vmovdqa %xmm2,80(%esp) + movl %eax,%ebp + xorl %ecx,%esi + vmovdqa %xmm1,%xmm2 + vpaddd %xmm5,%xmm1,%xmm1 + shldl $5,%eax,%eax + addl %esi,%edi + vpxor %xmm0,%xmm6,%xmm6 + xorl %ebx,%ebp + xorl %ecx,%ebx + addl %eax,%edi + addl 36(%esp),%edx + vpsrld $30,%xmm6,%xmm0 + vmovdqa %xmm1,16(%esp) + andl %ebx,%ebp + xorl %ecx,%ebx + shrdl $7,%eax,%eax + movl %edi,%esi + vpslld $2,%xmm6,%xmm6 + xorl %ebx,%ebp + shldl $5,%edi,%edi + addl %ebp,%edx + xorl %eax,%esi + xorl %ebx,%eax + addl %edi,%edx + addl 40(%esp),%ecx + andl %eax,%esi + vpor %xmm0,%xmm6,%xmm6 + xorl %ebx,%eax + shrdl $7,%edi,%edi + vmovdqa 96(%esp),%xmm0 + movl %edx,%ebp + xorl %eax,%esi + shldl $5,%edx,%edx + addl %esi,%ecx + xorl %edi,%ebp + xorl %eax,%edi + addl %edx,%ecx + addl 44(%esp),%ebx + andl %edi,%ebp + xorl %eax,%edi + shrdl $7,%edx,%edx + movl %ecx,%esi + xorl %edi,%ebp + shldl $5,%ecx,%ecx + addl %ebp,%ebx + xorl %edx,%esi + xorl %edi,%edx + addl %ecx,%ebx + vpalignr $8,%xmm5,%xmm6,%xmm1 + vpxor %xmm3,%xmm7,%xmm7 + addl 48(%esp),%eax + andl %edx,%esi + xorl %edi,%edx + shrdl $7,%ecx,%ecx + vpxor %xmm0,%xmm7,%xmm7 + vmovdqa %xmm3,96(%esp) + movl %ebx,%ebp + xorl %edx,%esi + vmovdqa 144(%esp),%xmm3 + vpaddd %xmm6,%xmm2,%xmm2 + shldl $5,%ebx,%ebx + addl %esi,%eax + vpxor %xmm1,%xmm7,%xmm7 + xorl %ecx,%ebp + xorl %edx,%ecx + addl %ebx,%eax + addl 52(%esp),%edi + vpsrld $30,%xmm7,%xmm1 + vmovdqa %xmm2,32(%esp) + andl %ecx,%ebp + xorl %edx,%ecx + shrdl $7,%ebx,%ebx + movl %eax,%esi + vpslld $2,%xmm7,%xmm7 + xorl %ecx,%ebp + shldl $5,%eax,%eax + addl %ebp,%edi + xorl %ebx,%esi + xorl %ecx,%ebx + addl %eax,%edi + addl 56(%esp),%edx + andl %ebx,%esi + vpor %xmm1,%xmm7,%xmm7 + xorl %ecx,%ebx + shrdl $7,%eax,%eax + vmovdqa 64(%esp),%xmm1 + movl %edi,%ebp + xorl %ebx,%esi + shldl $5,%edi,%edi + addl %esi,%edx + xorl %eax,%ebp + xorl %ebx,%eax + addl %edi,%edx + addl 60(%esp),%ecx + andl %eax,%ebp + xorl %ebx,%eax + shrdl $7,%edi,%edi + movl %edx,%esi + xorl %eax,%ebp + shldl $5,%edx,%edx + addl %ebp,%ecx + xorl %edi,%esi + xorl %eax,%edi + addl %edx,%ecx + vpalignr $8,%xmm6,%xmm7,%xmm2 + vpxor %xmm4,%xmm0,%xmm0 + addl (%esp),%ebx + andl %edi,%esi + xorl %eax,%edi + shrdl $7,%edx,%edx + vpxor %xmm1,%xmm0,%xmm0 + vmovdqa %xmm4,64(%esp) + movl %ecx,%ebp + xorl %edi,%esi + vmovdqa %xmm3,%xmm4 + vpaddd %xmm7,%xmm3,%xmm3 + shldl $5,%ecx,%ecx + addl %esi,%ebx + vpxor %xmm2,%xmm0,%xmm0 + xorl %edx,%ebp + xorl %edi,%edx + addl %ecx,%ebx + addl 4(%esp),%eax + vpsrld $30,%xmm0,%xmm2 + vmovdqa %xmm3,48(%esp) + andl %edx,%ebp + xorl %edi,%edx + shrdl $7,%ecx,%ecx + movl %ebx,%esi + vpslld $2,%xmm0,%xmm0 + xorl %edx,%ebp + shldl $5,%ebx,%ebx + addl %ebp,%eax + xorl %ecx,%esi + xorl %edx,%ecx + addl %ebx,%eax + addl 8(%esp),%edi + andl %ecx,%esi + vpor %xmm2,%xmm0,%xmm0 + xorl %edx,%ecx + shrdl $7,%ebx,%ebx + vmovdqa 80(%esp),%xmm2 + movl %eax,%ebp + xorl %ecx,%esi + shldl $5,%eax,%eax + addl %esi,%edi + xorl %ebx,%ebp + xorl %ecx,%ebx + addl %eax,%edi + addl 12(%esp),%edx + andl %ebx,%ebp + xorl %ecx,%ebx + shrdl $7,%eax,%eax + movl %edi,%esi + xorl %ebx,%ebp + shldl $5,%edi,%edi + addl %ebp,%edx + xorl %eax,%esi + xorl %ebx,%eax + addl %edi,%edx + vpalignr $8,%xmm7,%xmm0,%xmm3 + vpxor %xmm5,%xmm1,%xmm1 + addl 16(%esp),%ecx + andl %eax,%esi + xorl %ebx,%eax + shrdl $7,%edi,%edi + vpxor %xmm2,%xmm1,%xmm1 + vmovdqa %xmm5,80(%esp) + movl %edx,%ebp + xorl %eax,%esi + vmovdqa %xmm4,%xmm5 + vpaddd %xmm0,%xmm4,%xmm4 + shldl $5,%edx,%edx + addl %esi,%ecx + vpxor %xmm3,%xmm1,%xmm1 + xorl %edi,%ebp + xorl %eax,%edi + addl %edx,%ecx + addl 20(%esp),%ebx + vpsrld $30,%xmm1,%xmm3 + vmovdqa %xmm4,(%esp) + andl %edi,%ebp + xorl %eax,%edi + shrdl $7,%edx,%edx + movl %ecx,%esi + vpslld $2,%xmm1,%xmm1 + xorl %edi,%ebp + shldl $5,%ecx,%ecx + addl %ebp,%ebx + xorl %edx,%esi + xorl %edi,%edx + addl %ecx,%ebx + addl 24(%esp),%eax + andl %edx,%esi + vpor %xmm3,%xmm1,%xmm1 + xorl %edi,%edx + shrdl $7,%ecx,%ecx + vmovdqa 96(%esp),%xmm3 + movl %ebx,%ebp + xorl %edx,%esi + shldl $5,%ebx,%ebx + addl %esi,%eax + xorl %ecx,%ebp + xorl %edx,%ecx + addl %ebx,%eax + addl 28(%esp),%edi + andl %ecx,%ebp + xorl %edx,%ecx + shrdl $7,%ebx,%ebx + movl %eax,%esi + xorl %ecx,%ebp + shldl $5,%eax,%eax + addl %ebp,%edi + xorl %ebx,%esi + xorl %ecx,%ebx + addl %eax,%edi + vpalignr $8,%xmm0,%xmm1,%xmm4 + vpxor %xmm6,%xmm2,%xmm2 + addl 32(%esp),%edx + andl %ebx,%esi + xorl %ecx,%ebx + shrdl $7,%eax,%eax + vpxor %xmm3,%xmm2,%xmm2 + vmovdqa %xmm6,96(%esp) + movl %edi,%ebp + xorl %ebx,%esi + vmovdqa %xmm5,%xmm6 + vpaddd %xmm1,%xmm5,%xmm5 + shldl $5,%edi,%edi + addl %esi,%edx + vpxor %xmm4,%xmm2,%xmm2 + xorl %eax,%ebp + xorl %ebx,%eax + addl %edi,%edx + addl 36(%esp),%ecx + vpsrld $30,%xmm2,%xmm4 + vmovdqa %xmm5,16(%esp) + andl %eax,%ebp + xorl %ebx,%eax + shrdl $7,%edi,%edi + movl %edx,%esi + vpslld $2,%xmm2,%xmm2 + xorl %eax,%ebp + shldl $5,%edx,%edx + addl %ebp,%ecx + xorl %edi,%esi + xorl %eax,%edi + addl %edx,%ecx + addl 40(%esp),%ebx + andl %edi,%esi + vpor %xmm4,%xmm2,%xmm2 + xorl %eax,%edi + shrdl $7,%edx,%edx + vmovdqa 64(%esp),%xmm4 + movl %ecx,%ebp + xorl %edi,%esi + shldl $5,%ecx,%ecx + addl %esi,%ebx + xorl %edx,%ebp + xorl %edi,%edx + addl %ecx,%ebx + addl 44(%esp),%eax + andl %edx,%ebp + xorl %edi,%edx + shrdl $7,%ecx,%ecx + movl %ebx,%esi + xorl %edx,%ebp + shldl $5,%ebx,%ebx + addl %ebp,%eax + xorl %edx,%esi + addl %ebx,%eax + vpalignr $8,%xmm1,%xmm2,%xmm5 + vpxor %xmm7,%xmm3,%xmm3 + addl 48(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + shldl $5,%eax,%eax + vpxor %xmm4,%xmm3,%xmm3 + vmovdqa %xmm7,64(%esp) + addl %esi,%edi + xorl %ecx,%ebp + vmovdqa %xmm6,%xmm7 + vpaddd %xmm2,%xmm6,%xmm6 + shrdl $7,%ebx,%ebx + addl %eax,%edi + vpxor %xmm5,%xmm3,%xmm3 + addl 52(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + shldl $5,%edi,%edi + vpsrld $30,%xmm3,%xmm5 + vmovdqa %xmm6,32(%esp) + addl %ebp,%edx + xorl %ebx,%esi + shrdl $7,%eax,%eax + addl %edi,%edx + vpslld $2,%xmm3,%xmm3 + addl 56(%esp),%ecx + xorl %eax,%esi + movl %edx,%ebp + shldl $5,%edx,%edx + addl %esi,%ecx + xorl %eax,%ebp + shrdl $7,%edi,%edi + addl %edx,%ecx + vpor %xmm5,%xmm3,%xmm3 + addl 60(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + shldl $5,%ecx,%ecx + addl %ebp,%ebx + xorl %edi,%esi + shrdl $7,%edx,%edx + addl %ecx,%ebx + addl (%esp),%eax + vpaddd %xmm3,%xmm7,%xmm7 + xorl %edx,%esi + movl %ebx,%ebp + shldl $5,%ebx,%ebx + addl %esi,%eax + vmovdqa %xmm7,48(%esp) + xorl %edx,%ebp + shrdl $7,%ecx,%ecx + addl %ebx,%eax + addl 4(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + shldl $5,%eax,%eax + addl %ebp,%edi + xorl %ecx,%esi + shrdl $7,%ebx,%ebx + addl %eax,%edi + addl 8(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + shldl $5,%edi,%edi + addl %esi,%edx + xorl %ebx,%ebp + shrdl $7,%eax,%eax + addl %edi,%edx + addl 12(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + shldl $5,%edx,%edx + addl %ebp,%ecx + xorl %eax,%esi + shrdl $7,%edi,%edi + addl %edx,%ecx + movl 196(%esp),%ebp + cmpl 200(%esp),%ebp + je L010done + vmovdqa 160(%esp),%xmm7 + vmovdqa 176(%esp),%xmm6 + vmovdqu (%ebp),%xmm0 + vmovdqu 16(%ebp),%xmm1 + vmovdqu 32(%ebp),%xmm2 + vmovdqu 48(%ebp),%xmm3 + addl $64,%ebp + vpshufb %xmm6,%xmm0,%xmm0 + movl %ebp,196(%esp) + vmovdqa %xmm7,96(%esp) + addl 16(%esp),%ebx + xorl %edi,%esi + vpshufb %xmm6,%xmm1,%xmm1 + movl %ecx,%ebp + shldl $5,%ecx,%ecx + vpaddd %xmm7,%xmm0,%xmm4 + addl %esi,%ebx + xorl %edi,%ebp + shrdl $7,%edx,%edx + addl %ecx,%ebx + vmovdqa %xmm4,(%esp) + addl 20(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + shldl $5,%ebx,%ebx + addl %ebp,%eax + xorl %edx,%esi + shrdl $7,%ecx,%ecx + addl %ebx,%eax + addl 24(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + shldl $5,%eax,%eax + addl %esi,%edi + xorl %ecx,%ebp + shrdl $7,%ebx,%ebx + addl %eax,%edi + addl 28(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + shldl $5,%edi,%edi + addl %ebp,%edx + xorl %ebx,%esi + shrdl $7,%eax,%eax + addl %edi,%edx + addl 32(%esp),%ecx + xorl %eax,%esi + vpshufb %xmm6,%xmm2,%xmm2 + movl %edx,%ebp + shldl $5,%edx,%edx + vpaddd %xmm7,%xmm1,%xmm5 + addl %esi,%ecx + xorl %eax,%ebp + shrdl $7,%edi,%edi + addl %edx,%ecx + vmovdqa %xmm5,16(%esp) + addl 36(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + shldl $5,%ecx,%ecx + addl %ebp,%ebx + xorl %edi,%esi + shrdl $7,%edx,%edx + addl %ecx,%ebx + addl 40(%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + shldl $5,%ebx,%ebx + addl %esi,%eax + xorl %edx,%ebp + shrdl $7,%ecx,%ecx + addl %ebx,%eax + addl 44(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + shldl $5,%eax,%eax + addl %ebp,%edi + xorl %ecx,%esi + shrdl $7,%ebx,%ebx + addl %eax,%edi + addl 48(%esp),%edx + xorl %ebx,%esi + vpshufb %xmm6,%xmm3,%xmm3 + movl %edi,%ebp + shldl $5,%edi,%edi + vpaddd %xmm7,%xmm2,%xmm6 + addl %esi,%edx + xorl %ebx,%ebp + shrdl $7,%eax,%eax + addl %edi,%edx + vmovdqa %xmm6,32(%esp) + addl 52(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + shldl $5,%edx,%edx + addl %ebp,%ecx + xorl %eax,%esi + shrdl $7,%edi,%edi + addl %edx,%ecx + addl 56(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + shldl $5,%ecx,%ecx + addl %esi,%ebx + xorl %edi,%ebp + shrdl $7,%edx,%edx + addl %ecx,%ebx + addl 60(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + shldl $5,%ebx,%ebx + addl %ebp,%eax + shrdl $7,%ecx,%ecx + addl %ebx,%eax + movl 192(%esp),%ebp + addl (%ebp),%eax + addl 4(%ebp),%esi + addl 8(%ebp),%ecx + movl %eax,(%ebp) + addl 12(%ebp),%edx + movl %esi,4(%ebp) + addl 16(%ebp),%edi + movl %ecx,%ebx + movl %ecx,8(%ebp) + xorl %edx,%ebx + movl %edx,12(%ebp) + movl %edi,16(%ebp) + movl %esi,%ebp + andl %ebx,%esi + movl %ebp,%ebx + jmp L009loop +.align 4,0x90 +L010done: + addl 16(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + shldl $5,%ecx,%ecx + addl %esi,%ebx + xorl %edi,%ebp + shrdl $7,%edx,%edx + addl %ecx,%ebx + addl 20(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + shldl $5,%ebx,%ebx + addl %ebp,%eax + xorl %edx,%esi + shrdl $7,%ecx,%ecx + addl %ebx,%eax + addl 24(%esp),%edi + xorl %ecx,%esi + movl %eax,%ebp + shldl $5,%eax,%eax + addl %esi,%edi + xorl %ecx,%ebp + shrdl $7,%ebx,%ebx + addl %eax,%edi + addl 28(%esp),%edx + xorl %ebx,%ebp + movl %edi,%esi + shldl $5,%edi,%edi + addl %ebp,%edx + xorl %ebx,%esi + shrdl $7,%eax,%eax + addl %edi,%edx + addl 32(%esp),%ecx + xorl %eax,%esi + movl %edx,%ebp + shldl $5,%edx,%edx + addl %esi,%ecx + xorl %eax,%ebp + shrdl $7,%edi,%edi + addl %edx,%ecx + addl 36(%esp),%ebx + xorl %edi,%ebp + movl %ecx,%esi + shldl $5,%ecx,%ecx + addl %ebp,%ebx + xorl %edi,%esi + shrdl $7,%edx,%edx + addl %ecx,%ebx + addl 40(%esp),%eax + xorl %edx,%esi + movl %ebx,%ebp + shldl $5,%ebx,%ebx + addl %esi,%eax + xorl %edx,%ebp + shrdl $7,%ecx,%ecx + addl %ebx,%eax + addl 44(%esp),%edi + xorl %ecx,%ebp + movl %eax,%esi + shldl $5,%eax,%eax + addl %ebp,%edi + xorl %ecx,%esi + shrdl $7,%ebx,%ebx + addl %eax,%edi + addl 48(%esp),%edx + xorl %ebx,%esi + movl %edi,%ebp + shldl $5,%edi,%edi + addl %esi,%edx + xorl %ebx,%ebp + shrdl $7,%eax,%eax + addl %edi,%edx + addl 52(%esp),%ecx + xorl %eax,%ebp + movl %edx,%esi + shldl $5,%edx,%edx + addl %ebp,%ecx + xorl %eax,%esi + shrdl $7,%edi,%edi + addl %edx,%ecx + addl 56(%esp),%ebx + xorl %edi,%esi + movl %ecx,%ebp + shldl $5,%ecx,%ecx + addl %esi,%ebx + xorl %edi,%ebp + shrdl $7,%edx,%edx + addl %ecx,%ebx + addl 60(%esp),%eax + xorl %edx,%ebp + movl %ebx,%esi + shldl $5,%ebx,%ebx + addl %ebp,%eax + shrdl $7,%ecx,%ecx + addl %ebx,%eax + vzeroall + movl 192(%esp),%ebp + addl (%ebp),%eax + movl 204(%esp),%esp + addl 4(%ebp),%esi + addl 8(%ebp),%ecx + movl %eax,(%ebp) + addl 12(%ebp),%edx + movl %esi,4(%ebp) + addl 16(%ebp),%edi + movl %ecx,8(%ebp) + movl %edx,12(%ebp) + movl %edi,16(%ebp) + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.align 6,0x90 +LK_XX_XX: +.long 1518500249,1518500249,1518500249,1518500249 +.long 1859775393,1859775393,1859775393,1859775393 +.long 2400959708,2400959708,2400959708,2400959708 +.long 3395469782,3395469782,3395469782,3395469782 +.long 66051,67438087,134810123,202182159 +.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 +.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115 +.byte 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82 +.byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112 +.byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 +.comm _OPENSSL_ia32cap_P,16 diff --git a/deps/openssl/config/archs/BSD-x86/no-asm/configdata.pm b/deps/openssl/config/archs/BSD-x86/no-asm/configdata.pm index c8fd824b7fa837..5ca2779c7e3d61 100644 --- a/deps/openssl/config/archs/BSD-x86/no-asm/configdata.pm +++ b/deps/openssl/config/archs/BSD-x86/no-asm/configdata.pm @@ -61,7 +61,7 @@ our %config = ( options => "enable-ssl-trace no-afalgeng no-asan no-asm no-buildtest-c++ no-comp no-crypto-mdebug no-crypto-mdebug-backtrace no-dynamic-engine no-ec_nistp_64_gcc_128 no-egd no-external-tests no-fuzz-afl no-fuzz-libfuzzer no-heartbeats no-md2 no-msan no-rc5 no-sctp no-shared no-ssl3 no-ssl3-method no-ubsan no-unit-test no-weak-ssl-ciphers no-zlib no-zlib-dynamic", perl_archname => "x86_64-linux-gnu-thread-multi", perl_cmd => "/usr/bin/perl", - perl_version => "5.28.1", + perl_version => "5.26.1", perlargv => [ "no-comp", "no-shared", "no-afalgeng", "enable-ssl-trace", "no-asm", "BSD-x86" ], perlenv => { "AR" => undef, @@ -110,8 +110,8 @@ our %config = ( sourcedir => ".", target => "BSD-x86", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -15305,3 +15305,4 @@ Verbose output. =back =cut + diff --git a/deps/openssl/config/archs/BSD-x86/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/BSD-x86/no-asm/crypto/buildinf.h index 4d7a6c77c175cd..da7a46d286147f 100644 --- a/deps/openssl/config/archs/BSD-x86/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/BSD-x86/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: BSD-x86" -#define DATE "built on: Wed Mar 18 21:04:56 2020 UTC" +#define DATE "built on: Tue Mar 31 13:06:30 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/configdata.pm b/deps/openssl/config/archs/BSD-x86_64/asm/configdata.pm index 714d07a5bbde64..f800e14a842a8d 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/configdata.pm +++ b/deps/openssl/config/archs/BSD-x86_64/asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "BSD-x86_64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/aes/aesni-sha1-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/aes/aesni-sha1-x86_64.s index 978bd2b6239c15..a38e21f0484e24 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/aes/aesni-sha1-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/aes/aesni-sha1-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha1_enc,@function .align 32 aesni_cbc_sha1_enc: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+0(%rip),%r10d movq OPENSSL_ia32cap_P+4(%rip),%r11 @@ -18,7 +18,7 @@ aesni_cbc_sha1_enc: je aesni_cbc_sha1_enc_avx jmp aesni_cbc_sha1_enc_ssse3 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc .type aesni_cbc_sha1_enc_ssse3,@function .align 32 @@ -2732,7 +2732,7 @@ K_XX_XX: .type aesni_cbc_sha1_enc_shaext,@function .align 32 aesni_cbc_sha1_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 movdqu (%r9),%xmm8 movd 16(%r9),%xmm9 @@ -3031,5 +3031,5 @@ aesni_cbc_sha1_enc_shaext: movdqu %xmm8,(%r9) movd %xmm9,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/aes/aesni-sha256-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/aes/aesni-sha256-x86_64.s index dd09f1b290af62..3e56a82578a354 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/aes/aesni-sha256-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/aes/aesni-sha256-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha256_enc,@function .align 16 aesni_cbc_sha256_enc: -.cfi_startproc +.cfi_startproc leaq OPENSSL_ia32cap_P(%rip),%r11 movl $1,%eax cmpq $0,%rdi @@ -31,7 +31,7 @@ aesni_cbc_sha256_enc: ud2 .Lprobe: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc,.-aesni_cbc_sha256_enc .align 64 @@ -4081,7 +4081,7 @@ aesni_cbc_sha256_enc_avx2: .type aesni_cbc_sha256_enc_shaext,@function .align 32 aesni_cbc_sha256_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 leaq K256+128(%rip),%rax movdqu (%r9),%xmm1 @@ -4431,5 +4431,5 @@ aesni_cbc_sha256_enc_shaext: movdqu %xmm1,(%r9) movdqu %xmm2,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc_shaext,.-aesni_cbc_sha256_enc_shaext diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/aes/aesni-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/aes/aesni-x86_64.s index c1e791eff59235..1a4b22e7b8a5d9 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/aes/aesni-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/aes/aesni-x86_64.s @@ -861,7 +861,7 @@ aesni_ecb_encrypt: .type aesni_ccm64_encrypt_blocks,@function .align 16 aesni_ccm64_encrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movdqu (%r8),%xmm6 movdqa .Lincrement64(%rip),%xmm9 @@ -920,13 +920,13 @@ aesni_ccm64_encrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks .globl aesni_ccm64_decrypt_blocks .type aesni_ccm64_decrypt_blocks,@function .align 16 aesni_ccm64_decrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movups (%r8),%xmm6 movdqu (%r9),%xmm3 @@ -1019,7 +1019,7 @@ aesni_ccm64_decrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks .globl aesni_ctr32_encrypt_blocks .type aesni_ctr32_encrypt_blocks,@function @@ -2794,7 +2794,7 @@ aesni_ocb_encrypt: .type __ocb_encrypt6,@function .align 32 __ocb_encrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2892,13 +2892,13 @@ __ocb_encrypt6: .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt6,.-__ocb_encrypt6 .type __ocb_encrypt4,@function .align 32 __ocb_encrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2963,13 +2963,13 @@ __ocb_encrypt4: .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt4,.-__ocb_encrypt4 .type __ocb_encrypt1,@function .align 32 __ocb_encrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm2,%xmm8 @@ -3000,7 +3000,7 @@ __ocb_encrypt1: .byte 102,15,56,221,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt1,.-__ocb_encrypt1 .globl aesni_ocb_decrypt @@ -3243,7 +3243,7 @@ aesni_ocb_decrypt: .type __ocb_decrypt6,@function .align 32 __ocb_decrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3335,13 +3335,13 @@ __ocb_decrypt6: .byte 102,65,15,56,223,246 .byte 102,65,15,56,223,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt6,.-__ocb_decrypt6 .type __ocb_decrypt4,@function .align 32 __ocb_decrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3402,13 +3402,13 @@ __ocb_decrypt4: .byte 102,65,15,56,223,228 .byte 102,65,15,56,223,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt4,.-__ocb_decrypt4 .type __ocb_decrypt1,@function .align 32 __ocb_decrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm7,%xmm2 @@ -3438,7 +3438,7 @@ __ocb_decrypt1: .byte 102,15,56,223,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt1,.-__ocb_decrypt1 .globl aesni_cbc_encrypt .type aesni_cbc_encrypt,@function @@ -4447,7 +4447,7 @@ __aesni_set_encrypt_key: shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_set_encrypt_key,.-aesni_set_encrypt_key .size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key .align 64 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bf/bf-586.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bf/bf-586.s index 7d2d172be109ea..73c5310822042a 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bf/bf-586.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bf/bf-586.s @@ -10,7 +10,7 @@ L_BF_encrypt_begin: movl 16(%esp),%ebp pushl %esi pushl %edi - # Load the 2 words + # Load the 2 words movl (%ebx),%edi movl 4(%ebx),%esi xorl %eax,%eax @@ -18,7 +18,7 @@ L_BF_encrypt_begin: xorl %ecx,%ecx xorl %ebx,%edi - # Round 0 + # Round 0 movl 4(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -38,7 +38,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 1 + # Round 1 movl 8(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -58,7 +58,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 2 + # Round 2 movl 12(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -78,7 +78,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 3 + # Round 3 movl 16(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -98,7 +98,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 4 + # Round 4 movl 20(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -118,7 +118,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 5 + # Round 5 movl 24(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -138,7 +138,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 6 + # Round 6 movl 28(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -158,7 +158,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 7 + # Round 7 movl 32(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -178,7 +178,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 8 + # Round 8 movl 36(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -198,7 +198,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 9 + # Round 9 movl 40(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -218,7 +218,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 10 + # Round 10 movl 44(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -238,7 +238,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 11 + # Round 11 movl 48(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -258,7 +258,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 12 + # Round 12 movl 52(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -278,7 +278,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 13 + # Round 13 movl 56(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -298,7 +298,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 14 + # Round 14 movl 60(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -318,7 +318,7 @@ L_BF_encrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 15 + # Round 15 movl 64(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -335,7 +335,7 @@ L_BF_encrypt_begin: xorl %eax,%ebx movl 3144(%ebp,%edx,4),%edx addl %edx,%ebx - # Load parameter 0 (16) enc=1 + # Load parameter 0 (16) enc=1 movl 20(%esp),%eax xorl %ebx,%edi movl 68(%ebp),%edx @@ -358,7 +358,7 @@ L_BF_decrypt_begin: movl 16(%esp),%ebp pushl %esi pushl %edi - # Load the 2 words + # Load the 2 words movl (%ebx),%edi movl 4(%ebx),%esi xorl %eax,%eax @@ -366,7 +366,7 @@ L_BF_decrypt_begin: xorl %ecx,%ecx xorl %ebx,%edi - # Round 16 + # Round 16 movl 64(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -386,7 +386,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 15 + # Round 15 movl 60(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -406,7 +406,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 14 + # Round 14 movl 56(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -426,7 +426,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 13 + # Round 13 movl 52(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -446,7 +446,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 12 + # Round 12 movl 48(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -466,7 +466,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 11 + # Round 11 movl 44(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -486,7 +486,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 10 + # Round 10 movl 40(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -506,7 +506,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 9 + # Round 9 movl 36(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -526,7 +526,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 8 + # Round 8 movl 32(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -546,7 +546,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 7 + # Round 7 movl 28(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -566,7 +566,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 6 + # Round 6 movl 24(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -586,7 +586,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 5 + # Round 5 movl 20(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -606,7 +606,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 4 + # Round 4 movl 16(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -626,7 +626,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 3 + # Round 3 movl 12(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -646,7 +646,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%edi - # Round 2 + # Round 2 movl 8(%ebp),%edx movl %edi,%ebx xorl %edx,%esi @@ -666,7 +666,7 @@ L_BF_decrypt_begin: xorl %eax,%eax xorl %ebx,%esi - # Round 1 + # Round 1 movl 4(%ebp),%edx movl %esi,%ebx xorl %edx,%edi @@ -683,7 +683,7 @@ L_BF_decrypt_begin: xorl %eax,%ebx movl 3144(%ebp,%edx,4),%edx addl %edx,%ebx - # Load parameter 0 (1) enc=0 + # Load parameter 0 (1) enc=0 movl 20(%esp),%eax xorl %ebx,%edi movl (%ebp),%edx @@ -705,7 +705,7 @@ L_BF_cbc_encrypt_begin: pushl %esi pushl %edi movl 28(%esp),%ebp - # getting iv ptr from parameter 4 + # getting iv ptr from parameter 4 movl 36(%esp),%ebx movl (%ebx),%esi movl 4(%ebx),%edi @@ -716,9 +716,9 @@ L_BF_cbc_encrypt_begin: movl %esp,%ebx movl 36(%esp),%esi movl 40(%esp),%edi - # getting encrypt flag from parameter 5 + # getting encrypt flag from parameter 5 movl 56(%esp),%ecx - # get and push parameter 3 + # get and push parameter 3 movl 48(%esp),%eax pushl %eax pushl %ebx diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/bn-586.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/bn-586.s index 09c65d1d3e8668..c85836ac47ee45 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/bn-586.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/bn-586.s @@ -114,7 +114,7 @@ L001maw_non_sse2: jz L006maw_finish .align 4,0x90 L007maw_loop: - # Round 0 + # Round 0 movl (%ebx),%eax mull %ebp addl %esi,%eax @@ -123,7 +123,7 @@ L007maw_loop: adcl $0,%edx movl %eax,(%edi) movl %edx,%esi - # Round 4 + # Round 4 movl 4(%ebx),%eax mull %ebp addl %esi,%eax @@ -132,7 +132,7 @@ L007maw_loop: adcl $0,%edx movl %eax,4(%edi) movl %edx,%esi - # Round 8 + # Round 8 movl 8(%ebx),%eax mull %ebp addl %esi,%eax @@ -141,7 +141,7 @@ L007maw_loop: adcl $0,%edx movl %eax,8(%edi) movl %edx,%esi - # Round 12 + # Round 12 movl 12(%ebx),%eax mull %ebp addl %esi,%eax @@ -150,7 +150,7 @@ L007maw_loop: adcl $0,%edx movl %eax,12(%edi) movl %edx,%esi - # Round 16 + # Round 16 movl 16(%ebx),%eax mull %ebp addl %esi,%eax @@ -159,7 +159,7 @@ L007maw_loop: adcl $0,%edx movl %eax,16(%edi) movl %edx,%esi - # Round 20 + # Round 20 movl 20(%ebx),%eax mull %ebp addl %esi,%eax @@ -168,7 +168,7 @@ L007maw_loop: adcl $0,%edx movl %eax,20(%edi) movl %edx,%esi - # Round 24 + # Round 24 movl 24(%ebx),%eax mull %ebp addl %esi,%eax @@ -177,7 +177,7 @@ L007maw_loop: adcl $0,%edx movl %eax,24(%edi) movl %edx,%esi - # Round 28 + # Round 28 movl 28(%ebx),%eax mull %ebp addl %esi,%eax @@ -197,7 +197,7 @@ L006maw_finish: jnz L008maw_finish2 jmp L009maw_end L008maw_finish2: - # Tail Round 0 + # Tail Round 0 movl (%ebx),%eax mull %ebp addl %esi,%eax @@ -208,7 +208,7 @@ L008maw_finish2: movl %eax,(%edi) movl %edx,%esi jz L009maw_end - # Tail Round 1 + # Tail Round 1 movl 4(%ebx),%eax mull %ebp addl %esi,%eax @@ -219,7 +219,7 @@ L008maw_finish2: movl %eax,4(%edi) movl %edx,%esi jz L009maw_end - # Tail Round 2 + # Tail Round 2 movl 8(%ebx),%eax mull %ebp addl %esi,%eax @@ -230,7 +230,7 @@ L008maw_finish2: movl %eax,8(%edi) movl %edx,%esi jz L009maw_end - # Tail Round 3 + # Tail Round 3 movl 12(%ebx),%eax mull %ebp addl %esi,%eax @@ -241,7 +241,7 @@ L008maw_finish2: movl %eax,12(%edi) movl %edx,%esi jz L009maw_end - # Tail Round 4 + # Tail Round 4 movl 16(%ebx),%eax mull %ebp addl %esi,%eax @@ -252,7 +252,7 @@ L008maw_finish2: movl %eax,16(%edi) movl %edx,%esi jz L009maw_end - # Tail Round 5 + # Tail Round 5 movl 20(%ebx),%eax mull %ebp addl %esi,%eax @@ -263,7 +263,7 @@ L008maw_finish2: movl %eax,20(%edi) movl %edx,%esi jz L009maw_end - # Tail Round 6 + # Tail Round 6 movl 24(%ebx),%eax mull %ebp addl %esi,%eax @@ -324,56 +324,56 @@ L011mw_non_sse2: andl $4294967288,%ebp jz L013mw_finish L014mw_loop: - # Round 0 + # Round 0 movl (%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,(%edi) movl %edx,%esi - # Round 4 + # Round 4 movl 4(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,4(%edi) movl %edx,%esi - # Round 8 + # Round 8 movl 8(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,8(%edi) movl %edx,%esi - # Round 12 + # Round 12 movl 12(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,12(%edi) movl %edx,%esi - # Round 16 + # Round 16 movl 16(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,16(%edi) movl %edx,%esi - # Round 20 + # Round 20 movl 20(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,20(%edi) movl %edx,%esi - # Round 24 + # Round 24 movl 24(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,24(%edi) movl %edx,%esi - # Round 28 + # Round 28 movl 28(%ebx),%eax mull %ecx addl %esi,%eax @@ -392,7 +392,7 @@ L013mw_finish: jnz L015mw_finish2 jmp L016mw_end L015mw_finish2: - # Tail Round 0 + # Tail Round 0 movl (%ebx),%eax mull %ecx addl %esi,%eax @@ -401,7 +401,7 @@ L015mw_finish2: movl %edx,%esi decl %ebp jz L016mw_end - # Tail Round 1 + # Tail Round 1 movl 4(%ebx),%eax mull %ecx addl %esi,%eax @@ -410,7 +410,7 @@ L015mw_finish2: movl %edx,%esi decl %ebp jz L016mw_end - # Tail Round 2 + # Tail Round 2 movl 8(%ebx),%eax mull %ecx addl %esi,%eax @@ -419,7 +419,7 @@ L015mw_finish2: movl %edx,%esi decl %ebp jz L016mw_end - # Tail Round 3 + # Tail Round 3 movl 12(%ebx),%eax mull %ecx addl %esi,%eax @@ -428,7 +428,7 @@ L015mw_finish2: movl %edx,%esi decl %ebp jz L016mw_end - # Tail Round 4 + # Tail Round 4 movl 16(%ebx),%eax mull %ecx addl %esi,%eax @@ -437,7 +437,7 @@ L015mw_finish2: movl %edx,%esi decl %ebp jz L016mw_end - # Tail Round 5 + # Tail Round 5 movl 20(%ebx),%eax mull %ecx addl %esi,%eax @@ -446,7 +446,7 @@ L015mw_finish2: movl %edx,%esi decl %ebp jz L016mw_end - # Tail Round 6 + # Tail Round 6 movl 24(%ebx),%eax mull %ecx addl %esi,%eax @@ -497,42 +497,42 @@ L018sqr_non_sse2: andl $4294967288,%ebx jz L020sw_finish L021sw_loop: - # Round 0 + # Round 0 movl (%edi),%eax mull %eax movl %eax,(%esi) movl %edx,4(%esi) - # Round 4 + # Round 4 movl 4(%edi),%eax mull %eax movl %eax,8(%esi) movl %edx,12(%esi) - # Round 8 + # Round 8 movl 8(%edi),%eax mull %eax movl %eax,16(%esi) movl %edx,20(%esi) - # Round 12 + # Round 12 movl 12(%edi),%eax mull %eax movl %eax,24(%esi) movl %edx,28(%esi) - # Round 16 + # Round 16 movl 16(%edi),%eax mull %eax movl %eax,32(%esi) movl %edx,36(%esi) - # Round 20 + # Round 20 movl 20(%edi),%eax mull %eax movl %eax,40(%esi) movl %edx,44(%esi) - # Round 24 + # Round 24 movl 24(%edi),%eax mull %eax movl %eax,48(%esi) movl %edx,52(%esi) - # Round 28 + # Round 28 movl 28(%edi),%eax mull %eax movl %eax,56(%esi) @@ -546,49 +546,49 @@ L020sw_finish: movl 28(%esp),%ebx andl $7,%ebx jz L022sw_end - # Tail Round 0 + # Tail Round 0 movl (%edi),%eax mull %eax movl %eax,(%esi) decl %ebx movl %edx,4(%esi) jz L022sw_end - # Tail Round 1 + # Tail Round 1 movl 4(%edi),%eax mull %eax movl %eax,8(%esi) decl %ebx movl %edx,12(%esi) jz L022sw_end - # Tail Round 2 + # Tail Round 2 movl 8(%edi),%eax mull %eax movl %eax,16(%esi) decl %ebx movl %edx,20(%esi) jz L022sw_end - # Tail Round 3 + # Tail Round 3 movl 12(%edi),%eax mull %eax movl %eax,24(%esi) decl %ebx movl %edx,28(%esi) jz L022sw_end - # Tail Round 4 + # Tail Round 4 movl 16(%edi),%eax mull %eax movl %eax,32(%esi) decl %ebx movl %edx,36(%esi) jz L022sw_end - # Tail Round 5 + # Tail Round 5 movl 20(%edi),%eax mull %eax movl %eax,40(%esi) decl %ebx movl %edx,44(%esi) jz L022sw_end - # Tail Round 6 + # Tail Round 6 movl 24(%edi),%eax mull %eax movl %eax,48(%esi) @@ -625,7 +625,7 @@ L_bn_add_words_begin: andl $4294967288,%ebp jz L023aw_finish L024aw_loop: - # Round 0 + # Round 0 movl (%esi),%ecx movl (%edi),%edx addl %eax,%ecx @@ -634,7 +634,7 @@ L024aw_loop: addl %edx,%ecx adcl $0,%eax movl %ecx,(%ebx) - # Round 1 + # Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx addl %eax,%ecx @@ -643,7 +643,7 @@ L024aw_loop: addl %edx,%ecx adcl $0,%eax movl %ecx,4(%ebx) - # Round 2 + # Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx addl %eax,%ecx @@ -652,7 +652,7 @@ L024aw_loop: addl %edx,%ecx adcl $0,%eax movl %ecx,8(%ebx) - # Round 3 + # Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx addl %eax,%ecx @@ -661,7 +661,7 @@ L024aw_loop: addl %edx,%ecx adcl $0,%eax movl %ecx,12(%ebx) - # Round 4 + # Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx addl %eax,%ecx @@ -670,7 +670,7 @@ L024aw_loop: addl %edx,%ecx adcl $0,%eax movl %ecx,16(%ebx) - # Round 5 + # Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx addl %eax,%ecx @@ -679,7 +679,7 @@ L024aw_loop: addl %edx,%ecx adcl $0,%eax movl %ecx,20(%ebx) - # Round 6 + # Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx addl %eax,%ecx @@ -688,7 +688,7 @@ L024aw_loop: addl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) - # Round 7 + # Round 7 movl 28(%esi),%ecx movl 28(%edi),%edx addl %eax,%ecx @@ -707,7 +707,7 @@ L023aw_finish: movl 32(%esp),%ebp andl $7,%ebp jz L025aw_end - # Tail Round 0 + # Tail Round 0 movl (%esi),%ecx movl (%edi),%edx addl %eax,%ecx @@ -718,7 +718,7 @@ L023aw_finish: decl %ebp movl %ecx,(%ebx) jz L025aw_end - # Tail Round 1 + # Tail Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx addl %eax,%ecx @@ -729,7 +729,7 @@ L023aw_finish: decl %ebp movl %ecx,4(%ebx) jz L025aw_end - # Tail Round 2 + # Tail Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx addl %eax,%ecx @@ -740,7 +740,7 @@ L023aw_finish: decl %ebp movl %ecx,8(%ebx) jz L025aw_end - # Tail Round 3 + # Tail Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx addl %eax,%ecx @@ -751,7 +751,7 @@ L023aw_finish: decl %ebp movl %ecx,12(%ebx) jz L025aw_end - # Tail Round 4 + # Tail Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx addl %eax,%ecx @@ -762,7 +762,7 @@ L023aw_finish: decl %ebp movl %ecx,16(%ebx) jz L025aw_end - # Tail Round 5 + # Tail Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx addl %eax,%ecx @@ -773,7 +773,7 @@ L023aw_finish: decl %ebp movl %ecx,20(%ebx) jz L025aw_end - # Tail Round 6 + # Tail Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx addl %eax,%ecx @@ -805,7 +805,7 @@ L_bn_sub_words_begin: andl $4294967288,%ebp jz L026aw_finish L027aw_loop: - # Round 0 + # Round 0 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx @@ -814,7 +814,7 @@ L027aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,(%ebx) - # Round 1 + # Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx subl %eax,%ecx @@ -823,7 +823,7 @@ L027aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,4(%ebx) - # Round 2 + # Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx subl %eax,%ecx @@ -832,7 +832,7 @@ L027aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,8(%ebx) - # Round 3 + # Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx subl %eax,%ecx @@ -841,7 +841,7 @@ L027aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,12(%ebx) - # Round 4 + # Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx subl %eax,%ecx @@ -850,7 +850,7 @@ L027aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,16(%ebx) - # Round 5 + # Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx subl %eax,%ecx @@ -859,7 +859,7 @@ L027aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,20(%ebx) - # Round 6 + # Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx subl %eax,%ecx @@ -868,7 +868,7 @@ L027aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) - # Round 7 + # Round 7 movl 28(%esi),%ecx movl 28(%edi),%edx subl %eax,%ecx @@ -887,7 +887,7 @@ L026aw_finish: movl 32(%esp),%ebp andl $7,%ebp jz L028aw_end - # Tail Round 0 + # Tail Round 0 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx @@ -898,7 +898,7 @@ L026aw_finish: decl %ebp movl %ecx,(%ebx) jz L028aw_end - # Tail Round 1 + # Tail Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx subl %eax,%ecx @@ -909,7 +909,7 @@ L026aw_finish: decl %ebp movl %ecx,4(%ebx) jz L028aw_end - # Tail Round 2 + # Tail Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx subl %eax,%ecx @@ -920,7 +920,7 @@ L026aw_finish: decl %ebp movl %ecx,8(%ebx) jz L028aw_end - # Tail Round 3 + # Tail Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx subl %eax,%ecx @@ -931,7 +931,7 @@ L026aw_finish: decl %ebp movl %ecx,12(%ebx) jz L028aw_end - # Tail Round 4 + # Tail Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx subl %eax,%ecx @@ -942,7 +942,7 @@ L026aw_finish: decl %ebp movl %ecx,16(%ebx) jz L028aw_end - # Tail Round 5 + # Tail Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx subl %eax,%ecx @@ -953,7 +953,7 @@ L026aw_finish: decl %ebp movl %ecx,20(%ebx) jz L028aw_end - # Tail Round 6 + # Tail Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx subl %eax,%ecx @@ -985,7 +985,7 @@ L_bn_sub_part_words_begin: andl $4294967288,%ebp jz L029aw_finish L030aw_loop: - # Round 0 + # Round 0 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx @@ -994,7 +994,7 @@ L030aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,(%ebx) - # Round 1 + # Round 1 movl 4(%esi),%ecx movl 4(%edi),%edx subl %eax,%ecx @@ -1003,7 +1003,7 @@ L030aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,4(%ebx) - # Round 2 + # Round 2 movl 8(%esi),%ecx movl 8(%edi),%edx subl %eax,%ecx @@ -1012,7 +1012,7 @@ L030aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,8(%ebx) - # Round 3 + # Round 3 movl 12(%esi),%ecx movl 12(%edi),%edx subl %eax,%ecx @@ -1021,7 +1021,7 @@ L030aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,12(%ebx) - # Round 4 + # Round 4 movl 16(%esi),%ecx movl 16(%edi),%edx subl %eax,%ecx @@ -1030,7 +1030,7 @@ L030aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,16(%ebx) - # Round 5 + # Round 5 movl 20(%esi),%ecx movl 20(%edi),%edx subl %eax,%ecx @@ -1039,7 +1039,7 @@ L030aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,20(%ebx) - # Round 6 + # Round 6 movl 24(%esi),%ecx movl 24(%edi),%edx subl %eax,%ecx @@ -1048,7 +1048,7 @@ L030aw_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) - # Round 7 + # Round 7 movl 28(%esi),%ecx movl 28(%edi),%edx subl %eax,%ecx @@ -1067,7 +1067,7 @@ L029aw_finish: movl 32(%esp),%ebp andl $7,%ebp jz L031aw_end - # Tail Round 0 + # Tail Round 0 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx @@ -1081,7 +1081,7 @@ L029aw_finish: addl $4,%ebx decl %ebp jz L031aw_end - # Tail Round 1 + # Tail Round 1 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx @@ -1095,7 +1095,7 @@ L029aw_finish: addl $4,%ebx decl %ebp jz L031aw_end - # Tail Round 2 + # Tail Round 2 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx @@ -1109,7 +1109,7 @@ L029aw_finish: addl $4,%ebx decl %ebp jz L031aw_end - # Tail Round 3 + # Tail Round 3 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx @@ -1123,7 +1123,7 @@ L029aw_finish: addl $4,%ebx decl %ebp jz L031aw_end - # Tail Round 4 + # Tail Round 4 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx @@ -1137,7 +1137,7 @@ L029aw_finish: addl $4,%ebx decl %ebp jz L031aw_end - # Tail Round 5 + # Tail Round 5 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx @@ -1151,7 +1151,7 @@ L029aw_finish: addl $4,%ebx decl %ebp jz L031aw_end - # Tail Round 6 + # Tail Round 6 movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx @@ -1170,14 +1170,14 @@ L031aw_end: cmpl $0,%ebp je L032pw_end jge L033pw_pos - # pw_neg + # pw_neg movl $0,%edx subl %ebp,%edx movl %edx,%ebp andl $4294967288,%ebp jz L034pw_neg_finish L035pw_neg_loop: - # dl<0 Round 0 + # dl<0 Round 0 movl $0,%ecx movl (%edi),%edx subl %eax,%ecx @@ -1186,7 +1186,7 @@ L035pw_neg_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,(%ebx) - # dl<0 Round 1 + # dl<0 Round 1 movl $0,%ecx movl 4(%edi),%edx subl %eax,%ecx @@ -1195,7 +1195,7 @@ L035pw_neg_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,4(%ebx) - # dl<0 Round 2 + # dl<0 Round 2 movl $0,%ecx movl 8(%edi),%edx subl %eax,%ecx @@ -1204,7 +1204,7 @@ L035pw_neg_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,8(%ebx) - # dl<0 Round 3 + # dl<0 Round 3 movl $0,%ecx movl 12(%edi),%edx subl %eax,%ecx @@ -1213,7 +1213,7 @@ L035pw_neg_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,12(%ebx) - # dl<0 Round 4 + # dl<0 Round 4 movl $0,%ecx movl 16(%edi),%edx subl %eax,%ecx @@ -1222,7 +1222,7 @@ L035pw_neg_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,16(%ebx) - # dl<0 Round 5 + # dl<0 Round 5 movl $0,%ecx movl 20(%edi),%edx subl %eax,%ecx @@ -1231,7 +1231,7 @@ L035pw_neg_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,20(%ebx) - # dl<0 Round 6 + # dl<0 Round 6 movl $0,%ecx movl 24(%edi),%edx subl %eax,%ecx @@ -1240,7 +1240,7 @@ L035pw_neg_loop: subl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) - # dl<0 Round 7 + # dl<0 Round 7 movl $0,%ecx movl 28(%edi),%edx subl %eax,%ecx @@ -1260,7 +1260,7 @@ L034pw_neg_finish: subl %edx,%ebp andl $7,%ebp jz L032pw_end - # dl<0 Tail Round 0 + # dl<0 Tail Round 0 movl $0,%ecx movl (%edi),%edx subl %eax,%ecx @@ -1271,7 +1271,7 @@ L034pw_neg_finish: decl %ebp movl %ecx,(%ebx) jz L032pw_end - # dl<0 Tail Round 1 + # dl<0 Tail Round 1 movl $0,%ecx movl 4(%edi),%edx subl %eax,%ecx @@ -1282,7 +1282,7 @@ L034pw_neg_finish: decl %ebp movl %ecx,4(%ebx) jz L032pw_end - # dl<0 Tail Round 2 + # dl<0 Tail Round 2 movl $0,%ecx movl 8(%edi),%edx subl %eax,%ecx @@ -1293,7 +1293,7 @@ L034pw_neg_finish: decl %ebp movl %ecx,8(%ebx) jz L032pw_end - # dl<0 Tail Round 3 + # dl<0 Tail Round 3 movl $0,%ecx movl 12(%edi),%edx subl %eax,%ecx @@ -1304,7 +1304,7 @@ L034pw_neg_finish: decl %ebp movl %ecx,12(%ebx) jz L032pw_end - # dl<0 Tail Round 4 + # dl<0 Tail Round 4 movl $0,%ecx movl 16(%edi),%edx subl %eax,%ecx @@ -1315,7 +1315,7 @@ L034pw_neg_finish: decl %ebp movl %ecx,16(%ebx) jz L032pw_end - # dl<0 Tail Round 5 + # dl<0 Tail Round 5 movl $0,%ecx movl 20(%edi),%edx subl %eax,%ecx @@ -1326,7 +1326,7 @@ L034pw_neg_finish: decl %ebp movl %ecx,20(%ebx) jz L032pw_end - # dl<0 Tail Round 6 + # dl<0 Tail Round 6 movl $0,%ecx movl 24(%edi),%edx subl %eax,%ecx @@ -1340,42 +1340,42 @@ L033pw_pos: andl $4294967288,%ebp jz L036pw_pos_finish L037pw_pos_loop: - # dl>0 Round 0 + # dl>0 Round 0 movl (%esi),%ecx subl %eax,%ecx movl %ecx,(%ebx) jnc L038pw_nc0 - # dl>0 Round 1 + # dl>0 Round 1 movl 4(%esi),%ecx subl %eax,%ecx movl %ecx,4(%ebx) jnc L039pw_nc1 - # dl>0 Round 2 + # dl>0 Round 2 movl 8(%esi),%ecx subl %eax,%ecx movl %ecx,8(%ebx) jnc L040pw_nc2 - # dl>0 Round 3 + # dl>0 Round 3 movl 12(%esi),%ecx subl %eax,%ecx movl %ecx,12(%ebx) jnc L041pw_nc3 - # dl>0 Round 4 + # dl>0 Round 4 movl 16(%esi),%ecx subl %eax,%ecx movl %ecx,16(%ebx) jnc L042pw_nc4 - # dl>0 Round 5 + # dl>0 Round 5 movl 20(%esi),%ecx subl %eax,%ecx movl %ecx,20(%ebx) jnc L043pw_nc5 - # dl>0 Round 6 + # dl>0 Round 6 movl 24(%esi),%ecx subl %eax,%ecx movl %ecx,24(%ebx) jnc L044pw_nc6 - # dl>0 Round 7 + # dl>0 Round 7 movl 28(%esi),%ecx subl %eax,%ecx movl %ecx,28(%ebx) @@ -1389,49 +1389,49 @@ L036pw_pos_finish: movl 36(%esp),%ebp andl $7,%ebp jz L032pw_end - # dl>0 Tail Round 0 + # dl>0 Tail Round 0 movl (%esi),%ecx subl %eax,%ecx movl %ecx,(%ebx) jnc L046pw_tail_nc0 decl %ebp jz L032pw_end - # dl>0 Tail Round 1 + # dl>0 Tail Round 1 movl 4(%esi),%ecx subl %eax,%ecx movl %ecx,4(%ebx) jnc L047pw_tail_nc1 decl %ebp jz L032pw_end - # dl>0 Tail Round 2 + # dl>0 Tail Round 2 movl 8(%esi),%ecx subl %eax,%ecx movl %ecx,8(%ebx) jnc L048pw_tail_nc2 decl %ebp jz L032pw_end - # dl>0 Tail Round 3 + # dl>0 Tail Round 3 movl 12(%esi),%ecx subl %eax,%ecx movl %ecx,12(%ebx) jnc L049pw_tail_nc3 decl %ebp jz L032pw_end - # dl>0 Tail Round 4 + # dl>0 Tail Round 4 movl 16(%esi),%ecx subl %eax,%ecx movl %ecx,16(%ebx) jnc L050pw_tail_nc4 decl %ebp jz L032pw_end - # dl>0 Tail Round 5 + # dl>0 Tail Round 5 movl 20(%esi),%ecx subl %eax,%ecx movl %ecx,20(%ebx) jnc L051pw_tail_nc5 decl %ebp jz L032pw_end - # dl>0 Tail Round 6 + # dl>0 Tail Round 6 movl 24(%esi),%ecx subl %eax,%ecx movl %ecx,24(%ebx) diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/co-586.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/co-586.s index e47c520da977c4..be96fbe5cc9ac5 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/co-586.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/co-586.s @@ -13,9 +13,9 @@ L_bn_mul_comba8_begin: movl (%esi),%eax xorl %ecx,%ecx movl (%edi),%edx - # ################## Calculate word 0 + # ################## Calculate word 0 xorl %ebp,%ebp - # mul a[0]*b[0] + # mul a[0]*b[0] mull %edx addl %eax,%ebx movl 20(%esp),%eax @@ -24,17 +24,17 @@ L_bn_mul_comba8_begin: adcl $0,%ebp movl %ebx,(%eax) movl 4(%esi),%eax - # saved r[0] - # ################## Calculate word 1 + # saved r[0] + # ################## Calculate word 1 xorl %ebx,%ebx - # mul a[1]*b[0] + # mul a[1]*b[0] mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx - # mul a[0]*b[1] + # mul a[0]*b[1] mull %edx addl %eax,%ecx movl 20(%esp),%eax @@ -43,24 +43,24 @@ L_bn_mul_comba8_begin: adcl $0,%ebx movl %ecx,4(%eax) movl 8(%esi),%eax - # saved r[1] - # ################## Calculate word 2 + # saved r[1] + # ################## Calculate word 2 xorl %ecx,%ecx - # mul a[2]*b[0] + # mul a[2]*b[0] mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx - # mul a[1]*b[1] + # mul a[1]*b[1] mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx - # mul a[0]*b[2] + # mul a[0]*b[2] mull %edx addl %eax,%ebp movl 20(%esp),%eax @@ -69,31 +69,31 @@ L_bn_mul_comba8_begin: adcl $0,%ecx movl %ebp,8(%eax) movl 12(%esi),%eax - # saved r[2] - # ################## Calculate word 3 + # saved r[2] + # ################## Calculate word 3 xorl %ebp,%ebp - # mul a[3]*b[0] + # mul a[3]*b[0] mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp - # mul a[2]*b[1] + # mul a[2]*b[1] mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp - # mul a[1]*b[2] + # mul a[1]*b[2] mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp - # mul a[0]*b[3] + # mul a[0]*b[3] mull %edx addl %eax,%ebx movl 20(%esp),%eax @@ -102,38 +102,38 @@ L_bn_mul_comba8_begin: adcl $0,%ebp movl %ebx,12(%eax) movl 16(%esi),%eax - # saved r[3] - # ################## Calculate word 4 + # saved r[3] + # ################## Calculate word 4 xorl %ebx,%ebx - # mul a[4]*b[0] + # mul a[4]*b[0] mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx - # mul a[3]*b[1] + # mul a[3]*b[1] mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx - # mul a[2]*b[2] + # mul a[2]*b[2] mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx - # mul a[1]*b[3] + # mul a[1]*b[3] mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx - # mul a[0]*b[4] + # mul a[0]*b[4] mull %edx addl %eax,%ecx movl 20(%esp),%eax @@ -142,45 +142,45 @@ L_bn_mul_comba8_begin: adcl $0,%ebx movl %ecx,16(%eax) movl 20(%esi),%eax - # saved r[4] - # ################## Calculate word 5 + # saved r[4] + # ################## Calculate word 5 xorl %ecx,%ecx - # mul a[5]*b[0] + # mul a[5]*b[0] mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx - # mul a[4]*b[1] + # mul a[4]*b[1] mull %edx addl %eax,%ebp movl 12(%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx - # mul a[3]*b[2] + # mul a[3]*b[2] mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx - # mul a[2]*b[3] + # mul a[2]*b[3] mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 16(%edi),%edx adcl $0,%ecx - # mul a[1]*b[4] + # mul a[1]*b[4] mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx - # mul a[0]*b[5] + # mul a[0]*b[5] mull %edx addl %eax,%ebp movl 20(%esp),%eax @@ -189,52 +189,52 @@ L_bn_mul_comba8_begin: adcl $0,%ecx movl %ebp,20(%eax) movl 24(%esi),%eax - # saved r[5] - # ################## Calculate word 6 + # saved r[5] + # ################## Calculate word 6 xorl %ebp,%ebp - # mul a[6]*b[0] + # mul a[6]*b[0] mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp - # mul a[5]*b[1] + # mul a[5]*b[1] mull %edx addl %eax,%ebx movl 16(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp - # mul a[4]*b[2] + # mul a[4]*b[2] mull %edx addl %eax,%ebx movl 12(%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp - # mul a[3]*b[3] + # mul a[3]*b[3] mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 16(%edi),%edx adcl $0,%ebp - # mul a[2]*b[4] + # mul a[2]*b[4] mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 20(%edi),%edx adcl $0,%ebp - # mul a[1]*b[5] + # mul a[1]*b[5] mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp - # mul a[0]*b[6] + # mul a[0]*b[6] mull %edx addl %eax,%ebx movl 20(%esp),%eax @@ -243,59 +243,59 @@ L_bn_mul_comba8_begin: adcl $0,%ebp movl %ebx,24(%eax) movl 28(%esi),%eax - # saved r[6] - # ################## Calculate word 7 + # saved r[6] + # ################## Calculate word 7 xorl %ebx,%ebx - # mul a[7]*b[0] + # mul a[7]*b[0] mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx - # mul a[6]*b[1] + # mul a[6]*b[1] mull %edx addl %eax,%ecx movl 20(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx - # mul a[5]*b[2] + # mul a[5]*b[2] mull %edx addl %eax,%ecx movl 16(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx - # mul a[4]*b[3] + # mul a[4]*b[3] mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx - # mul a[3]*b[4] + # mul a[3]*b[4] mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 20(%edi),%edx adcl $0,%ebx - # mul a[2]*b[5] + # mul a[2]*b[5] mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 24(%edi),%edx adcl $0,%ebx - # mul a[1]*b[6] + # mul a[1]*b[6] mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx - # mul a[0]*b[7] + # mul a[0]*b[7] mull %edx addl %eax,%ecx movl 20(%esp),%eax @@ -304,52 +304,52 @@ L_bn_mul_comba8_begin: adcl $0,%ebx movl %ecx,28(%eax) movl 28(%esi),%eax - # saved r[7] - # ################## Calculate word 8 + # saved r[7] + # ################## Calculate word 8 xorl %ecx,%ecx - # mul a[7]*b[1] + # mul a[7]*b[1] mull %edx addl %eax,%ebp movl 24(%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx - # mul a[6]*b[2] + # mul a[6]*b[2] mull %edx addl %eax,%ebp movl 20(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx - # mul a[5]*b[3] + # mul a[5]*b[3] mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 16(%edi),%edx adcl $0,%ecx - # mul a[4]*b[4] + # mul a[4]*b[4] mull %edx addl %eax,%ebp movl 12(%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx - # mul a[3]*b[5] + # mul a[3]*b[5] mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 24(%edi),%edx adcl $0,%ecx - # mul a[2]*b[6] + # mul a[2]*b[6] mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 28(%edi),%edx adcl $0,%ecx - # mul a[1]*b[7] + # mul a[1]*b[7] mull %edx addl %eax,%ebp movl 20(%esp),%eax @@ -358,45 +358,45 @@ L_bn_mul_comba8_begin: adcl $0,%ecx movl %ebp,32(%eax) movl 28(%esi),%eax - # saved r[8] - # ################## Calculate word 9 + # saved r[8] + # ################## Calculate word 9 xorl %ebp,%ebp - # mul a[7]*b[2] + # mul a[7]*b[2] mull %edx addl %eax,%ebx movl 24(%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp - # mul a[6]*b[3] + # mul a[6]*b[3] mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 16(%edi),%edx adcl $0,%ebp - # mul a[5]*b[4] + # mul a[5]*b[4] mull %edx addl %eax,%ebx movl 16(%esi),%eax adcl %edx,%ecx movl 20(%edi),%edx adcl $0,%ebp - # mul a[4]*b[5] + # mul a[4]*b[5] mull %edx addl %eax,%ebx movl 12(%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp - # mul a[3]*b[6] + # mul a[3]*b[6] mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 28(%edi),%edx adcl $0,%ebp - # mul a[2]*b[7] + # mul a[2]*b[7] mull %edx addl %eax,%ebx movl 20(%esp),%eax @@ -405,38 +405,38 @@ L_bn_mul_comba8_begin: adcl $0,%ebp movl %ebx,36(%eax) movl 28(%esi),%eax - # saved r[9] - # ################## Calculate word 10 + # saved r[9] + # ################## Calculate word 10 xorl %ebx,%ebx - # mul a[7]*b[3] + # mul a[7]*b[3] mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx - # mul a[6]*b[4] + # mul a[6]*b[4] mull %edx addl %eax,%ecx movl 20(%esi),%eax adcl %edx,%ebp movl 20(%edi),%edx adcl $0,%ebx - # mul a[5]*b[5] + # mul a[5]*b[5] mull %edx addl %eax,%ecx movl 16(%esi),%eax adcl %edx,%ebp movl 24(%edi),%edx adcl $0,%ebx - # mul a[4]*b[6] + # mul a[4]*b[6] mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx - # mul a[3]*b[7] + # mul a[3]*b[7] mull %edx addl %eax,%ecx movl 20(%esp),%eax @@ -445,31 +445,31 @@ L_bn_mul_comba8_begin: adcl $0,%ebx movl %ecx,40(%eax) movl 28(%esi),%eax - # saved r[10] - # ################## Calculate word 11 + # saved r[10] + # ################## Calculate word 11 xorl %ecx,%ecx - # mul a[7]*b[4] + # mul a[7]*b[4] mull %edx addl %eax,%ebp movl 24(%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx - # mul a[6]*b[5] + # mul a[6]*b[5] mull %edx addl %eax,%ebp movl 20(%esi),%eax adcl %edx,%ebx movl 24(%edi),%edx adcl $0,%ecx - # mul a[5]*b[6] + # mul a[5]*b[6] mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 28(%edi),%edx adcl $0,%ecx - # mul a[4]*b[7] + # mul a[4]*b[7] mull %edx addl %eax,%ebp movl 20(%esp),%eax @@ -478,24 +478,24 @@ L_bn_mul_comba8_begin: adcl $0,%ecx movl %ebp,44(%eax) movl 28(%esi),%eax - # saved r[11] - # ################## Calculate word 12 + # saved r[11] + # ################## Calculate word 12 xorl %ebp,%ebp - # mul a[7]*b[5] + # mul a[7]*b[5] mull %edx addl %eax,%ebx movl 24(%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp - # mul a[6]*b[6] + # mul a[6]*b[6] mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 28(%edi),%edx adcl $0,%ebp - # mul a[5]*b[7] + # mul a[5]*b[7] mull %edx addl %eax,%ebx movl 20(%esp),%eax @@ -504,17 +504,17 @@ L_bn_mul_comba8_begin: adcl $0,%ebp movl %ebx,48(%eax) movl 28(%esi),%eax - # saved r[12] - # ################## Calculate word 13 + # saved r[12] + # ################## Calculate word 13 xorl %ebx,%ebx - # mul a[7]*b[6] + # mul a[7]*b[6] mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx - # mul a[6]*b[7] + # mul a[6]*b[7] mull %edx addl %eax,%ecx movl 20(%esp),%eax @@ -523,18 +523,18 @@ L_bn_mul_comba8_begin: adcl $0,%ebx movl %ecx,52(%eax) movl 28(%esi),%eax - # saved r[13] - # ################## Calculate word 14 + # saved r[13] + # ################## Calculate word 14 xorl %ecx,%ecx - # mul a[7]*b[7] + # mul a[7]*b[7] mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx adcl $0,%ecx movl %ebp,56(%eax) - # saved r[14] - # save r[15] + # saved r[14] + # save r[15] movl %ebx,60(%eax) popl %ebx popl %ebp @@ -555,9 +555,9 @@ L_bn_mul_comba4_begin: movl (%esi),%eax xorl %ecx,%ecx movl (%edi),%edx - # ################## Calculate word 0 + # ################## Calculate word 0 xorl %ebp,%ebp - # mul a[0]*b[0] + # mul a[0]*b[0] mull %edx addl %eax,%ebx movl 20(%esp),%eax @@ -566,17 +566,17 @@ L_bn_mul_comba4_begin: adcl $0,%ebp movl %ebx,(%eax) movl 4(%esi),%eax - # saved r[0] - # ################## Calculate word 1 + # saved r[0] + # ################## Calculate word 1 xorl %ebx,%ebx - # mul a[1]*b[0] + # mul a[1]*b[0] mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx - # mul a[0]*b[1] + # mul a[0]*b[1] mull %edx addl %eax,%ecx movl 20(%esp),%eax @@ -585,24 +585,24 @@ L_bn_mul_comba4_begin: adcl $0,%ebx movl %ecx,4(%eax) movl 8(%esi),%eax - # saved r[1] - # ################## Calculate word 2 + # saved r[1] + # ################## Calculate word 2 xorl %ecx,%ecx - # mul a[2]*b[0] + # mul a[2]*b[0] mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx - # mul a[1]*b[1] + # mul a[1]*b[1] mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx - # mul a[0]*b[2] + # mul a[0]*b[2] mull %edx addl %eax,%ebp movl 20(%esp),%eax @@ -611,31 +611,31 @@ L_bn_mul_comba4_begin: adcl $0,%ecx movl %ebp,8(%eax) movl 12(%esi),%eax - # saved r[2] - # ################## Calculate word 3 + # saved r[2] + # ################## Calculate word 3 xorl %ebp,%ebp - # mul a[3]*b[0] + # mul a[3]*b[0] mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp - # mul a[2]*b[1] + # mul a[2]*b[1] mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp - # mul a[1]*b[2] + # mul a[1]*b[2] mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp - # mul a[0]*b[3] + # mul a[0]*b[3] mull %edx addl %eax,%ebx movl 20(%esp),%eax @@ -644,24 +644,24 @@ L_bn_mul_comba4_begin: adcl $0,%ebp movl %ebx,12(%eax) movl 12(%esi),%eax - # saved r[3] - # ################## Calculate word 4 + # saved r[3] + # ################## Calculate word 4 xorl %ebx,%ebx - # mul a[3]*b[1] + # mul a[3]*b[1] mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx - # mul a[2]*b[2] + # mul a[2]*b[2] mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx - # mul a[1]*b[3] + # mul a[1]*b[3] mull %edx addl %eax,%ecx movl 20(%esp),%eax @@ -670,17 +670,17 @@ L_bn_mul_comba4_begin: adcl $0,%ebx movl %ecx,16(%eax) movl 12(%esi),%eax - # saved r[4] - # ################## Calculate word 5 + # saved r[4] + # ################## Calculate word 5 xorl %ecx,%ecx - # mul a[3]*b[2] + # mul a[3]*b[2] mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx - # mul a[2]*b[3] + # mul a[2]*b[3] mull %edx addl %eax,%ebp movl 20(%esp),%eax @@ -689,18 +689,18 @@ L_bn_mul_comba4_begin: adcl $0,%ecx movl %ebp,20(%eax) movl 12(%esi),%eax - # saved r[5] - # ################## Calculate word 6 + # saved r[5] + # ################## Calculate word 6 xorl %ebp,%ebp - # mul a[3]*b[3] + # mul a[3]*b[3] mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx adcl $0,%ebp movl %ebx,24(%eax) - # saved r[6] - # save r[7] + # saved r[6] + # save r[7] movl %ecx,28(%eax) popl %ebx popl %ebp @@ -720,9 +720,9 @@ L_bn_sqr_comba8_begin: xorl %ebx,%ebx xorl %ecx,%ecx movl (%esi),%eax - # ############### Calculate word 0 + # ############### Calculate word 0 xorl %ebp,%ebp - # sqr a[0]*a[0] + # sqr a[0]*a[0] mull %eax addl %eax,%ebx adcl %edx,%ecx @@ -730,10 +730,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ebp movl %ebx,(%edi) movl 4(%esi),%eax - # saved r[0] - # ############### Calculate word 1 + # saved r[0] + # ############### Calculate word 1 xorl %ebx,%ebx - # sqr a[1]*a[0] + # sqr a[1]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx @@ -744,10 +744,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ebx movl %ecx,4(%edi) movl (%esi),%edx - # saved r[1] - # ############### Calculate word 2 + # saved r[1] + # ############### Calculate word 2 xorl %ecx,%ecx - # sqr a[2]*a[0] + # sqr a[2]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx @@ -756,7 +756,7 @@ L_bn_sqr_comba8_begin: adcl %edx,%ebx movl 4(%esi),%eax adcl $0,%ecx - # sqr a[1]*a[1] + # sqr a[1]*a[1] mull %eax addl %eax,%ebp adcl %edx,%ebx @@ -764,10 +764,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ecx movl %ebp,8(%edi) movl 12(%esi),%eax - # saved r[2] - # ############### Calculate word 3 + # saved r[2] + # ############### Calculate word 3 xorl %ebp,%ebp - # sqr a[3]*a[0] + # sqr a[3]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx @@ -777,7 +777,7 @@ L_bn_sqr_comba8_begin: movl 8(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx - # sqr a[2]*a[1] + # sqr a[2]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx @@ -788,10 +788,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ebp movl %ebx,12(%edi) movl (%esi),%edx - # saved r[3] - # ############### Calculate word 4 + # saved r[3] + # ############### Calculate word 4 xorl %ebx,%ebx - # sqr a[4]*a[0] + # sqr a[4]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx @@ -801,7 +801,7 @@ L_bn_sqr_comba8_begin: movl 12(%esi),%eax adcl $0,%ebx movl 4(%esi),%edx - # sqr a[3]*a[1] + # sqr a[3]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx @@ -810,7 +810,7 @@ L_bn_sqr_comba8_begin: adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx - # sqr a[2]*a[2] + # sqr a[2]*a[2] mull %eax addl %eax,%ecx adcl %edx,%ebp @@ -818,10 +818,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ebx movl %ecx,16(%edi) movl 20(%esi),%eax - # saved r[4] - # ############### Calculate word 5 + # saved r[4] + # ############### Calculate word 5 xorl %ecx,%ecx - # sqr a[5]*a[0] + # sqr a[5]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx @@ -831,7 +831,7 @@ L_bn_sqr_comba8_begin: movl 16(%esi),%eax adcl $0,%ecx movl 4(%esi),%edx - # sqr a[4]*a[1] + # sqr a[4]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx @@ -841,7 +841,7 @@ L_bn_sqr_comba8_begin: movl 12(%esi),%eax adcl $0,%ecx movl 8(%esi),%edx - # sqr a[3]*a[2] + # sqr a[3]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx @@ -852,10 +852,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ecx movl %ebp,20(%edi) movl (%esi),%edx - # saved r[5] - # ############### Calculate word 6 + # saved r[5] + # ############### Calculate word 6 xorl %ebp,%ebp - # sqr a[6]*a[0] + # sqr a[6]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx @@ -865,7 +865,7 @@ L_bn_sqr_comba8_begin: movl 20(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx - # sqr a[5]*a[1] + # sqr a[5]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx @@ -875,7 +875,7 @@ L_bn_sqr_comba8_begin: movl 16(%esi),%eax adcl $0,%ebp movl 8(%esi),%edx - # sqr a[4]*a[2] + # sqr a[4]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx @@ -884,7 +884,7 @@ L_bn_sqr_comba8_begin: adcl %edx,%ecx movl 12(%esi),%eax adcl $0,%ebp - # sqr a[3]*a[3] + # sqr a[3]*a[3] mull %eax addl %eax,%ebx adcl %edx,%ecx @@ -892,10 +892,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ebp movl %ebx,24(%edi) movl 28(%esi),%eax - # saved r[6] - # ############### Calculate word 7 + # saved r[6] + # ############### Calculate word 7 xorl %ebx,%ebx - # sqr a[7]*a[0] + # sqr a[7]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx @@ -905,7 +905,7 @@ L_bn_sqr_comba8_begin: movl 24(%esi),%eax adcl $0,%ebx movl 4(%esi),%edx - # sqr a[6]*a[1] + # sqr a[6]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx @@ -915,7 +915,7 @@ L_bn_sqr_comba8_begin: movl 20(%esi),%eax adcl $0,%ebx movl 8(%esi),%edx - # sqr a[5]*a[2] + # sqr a[5]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx @@ -925,7 +925,7 @@ L_bn_sqr_comba8_begin: movl 16(%esi),%eax adcl $0,%ebx movl 12(%esi),%edx - # sqr a[4]*a[3] + # sqr a[4]*a[3] mull %edx addl %eax,%eax adcl %edx,%edx @@ -936,10 +936,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ebx movl %ecx,28(%edi) movl 4(%esi),%edx - # saved r[7] - # ############### Calculate word 8 + # saved r[7] + # ############### Calculate word 8 xorl %ecx,%ecx - # sqr a[7]*a[1] + # sqr a[7]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx @@ -949,7 +949,7 @@ L_bn_sqr_comba8_begin: movl 24(%esi),%eax adcl $0,%ecx movl 8(%esi),%edx - # sqr a[6]*a[2] + # sqr a[6]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx @@ -959,7 +959,7 @@ L_bn_sqr_comba8_begin: movl 20(%esi),%eax adcl $0,%ecx movl 12(%esi),%edx - # sqr a[5]*a[3] + # sqr a[5]*a[3] mull %edx addl %eax,%eax adcl %edx,%edx @@ -968,7 +968,7 @@ L_bn_sqr_comba8_begin: adcl %edx,%ebx movl 16(%esi),%eax adcl $0,%ecx - # sqr a[4]*a[4] + # sqr a[4]*a[4] mull %eax addl %eax,%ebp adcl %edx,%ebx @@ -976,10 +976,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ecx movl %ebp,32(%edi) movl 28(%esi),%eax - # saved r[8] - # ############### Calculate word 9 + # saved r[8] + # ############### Calculate word 9 xorl %ebp,%ebp - # sqr a[7]*a[2] + # sqr a[7]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx @@ -989,7 +989,7 @@ L_bn_sqr_comba8_begin: movl 24(%esi),%eax adcl $0,%ebp movl 12(%esi),%edx - # sqr a[6]*a[3] + # sqr a[6]*a[3] mull %edx addl %eax,%eax adcl %edx,%edx @@ -999,7 +999,7 @@ L_bn_sqr_comba8_begin: movl 20(%esi),%eax adcl $0,%ebp movl 16(%esi),%edx - # sqr a[5]*a[4] + # sqr a[5]*a[4] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1010,10 +1010,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ebp movl %ebx,36(%edi) movl 12(%esi),%edx - # saved r[9] - # ############### Calculate word 10 + # saved r[9] + # ############### Calculate word 10 xorl %ebx,%ebx - # sqr a[7]*a[3] + # sqr a[7]*a[3] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1023,7 +1023,7 @@ L_bn_sqr_comba8_begin: movl 24(%esi),%eax adcl $0,%ebx movl 16(%esi),%edx - # sqr a[6]*a[4] + # sqr a[6]*a[4] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1032,7 +1032,7 @@ L_bn_sqr_comba8_begin: adcl %edx,%ebp movl 20(%esi),%eax adcl $0,%ebx - # sqr a[5]*a[5] + # sqr a[5]*a[5] mull %eax addl %eax,%ecx adcl %edx,%ebp @@ -1040,10 +1040,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ebx movl %ecx,40(%edi) movl 28(%esi),%eax - # saved r[10] - # ############### Calculate word 11 + # saved r[10] + # ############### Calculate word 11 xorl %ecx,%ecx - # sqr a[7]*a[4] + # sqr a[7]*a[4] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1053,7 +1053,7 @@ L_bn_sqr_comba8_begin: movl 24(%esi),%eax adcl $0,%ecx movl 20(%esi),%edx - # sqr a[6]*a[5] + # sqr a[6]*a[5] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1064,10 +1064,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ecx movl %ebp,44(%edi) movl 20(%esi),%edx - # saved r[11] - # ############### Calculate word 12 + # saved r[11] + # ############### Calculate word 12 xorl %ebp,%ebp - # sqr a[7]*a[5] + # sqr a[7]*a[5] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1076,7 +1076,7 @@ L_bn_sqr_comba8_begin: adcl %edx,%ecx movl 24(%esi),%eax adcl $0,%ebp - # sqr a[6]*a[6] + # sqr a[6]*a[6] mull %eax addl %eax,%ebx adcl %edx,%ecx @@ -1084,10 +1084,10 @@ L_bn_sqr_comba8_begin: adcl $0,%ebp movl %ebx,48(%edi) movl 28(%esi),%eax - # saved r[12] - # ############### Calculate word 13 + # saved r[12] + # ############### Calculate word 13 xorl %ebx,%ebx - # sqr a[7]*a[6] + # sqr a[7]*a[6] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1097,16 +1097,16 @@ L_bn_sqr_comba8_begin: movl 28(%esi),%eax adcl $0,%ebx movl %ecx,52(%edi) - # saved r[13] - # ############### Calculate word 14 + # saved r[13] + # ############### Calculate word 14 xorl %ecx,%ecx - # sqr a[7]*a[7] + # sqr a[7]*a[7] mull %eax addl %eax,%ebp adcl %edx,%ebx adcl $0,%ecx movl %ebp,56(%edi) - # saved r[14] + # saved r[14] movl %ebx,60(%edi) popl %ebx popl %ebp @@ -1126,9 +1126,9 @@ L_bn_sqr_comba4_begin: xorl %ebx,%ebx xorl %ecx,%ecx movl (%esi),%eax - # ############### Calculate word 0 + # ############### Calculate word 0 xorl %ebp,%ebp - # sqr a[0]*a[0] + # sqr a[0]*a[0] mull %eax addl %eax,%ebx adcl %edx,%ecx @@ -1136,10 +1136,10 @@ L_bn_sqr_comba4_begin: adcl $0,%ebp movl %ebx,(%edi) movl 4(%esi),%eax - # saved r[0] - # ############### Calculate word 1 + # saved r[0] + # ############### Calculate word 1 xorl %ebx,%ebx - # sqr a[1]*a[0] + # sqr a[1]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1150,10 +1150,10 @@ L_bn_sqr_comba4_begin: adcl $0,%ebx movl %ecx,4(%edi) movl (%esi),%edx - # saved r[1] - # ############### Calculate word 2 + # saved r[1] + # ############### Calculate word 2 xorl %ecx,%ecx - # sqr a[2]*a[0] + # sqr a[2]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1162,7 +1162,7 @@ L_bn_sqr_comba4_begin: adcl %edx,%ebx movl 4(%esi),%eax adcl $0,%ecx - # sqr a[1]*a[1] + # sqr a[1]*a[1] mull %eax addl %eax,%ebp adcl %edx,%ebx @@ -1170,10 +1170,10 @@ L_bn_sqr_comba4_begin: adcl $0,%ecx movl %ebp,8(%edi) movl 12(%esi),%eax - # saved r[2] - # ############### Calculate word 3 + # saved r[2] + # ############### Calculate word 3 xorl %ebp,%ebp - # sqr a[3]*a[0] + # sqr a[3]*a[0] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1183,7 +1183,7 @@ L_bn_sqr_comba4_begin: movl 8(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx - # sqr a[2]*a[1] + # sqr a[2]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1194,10 +1194,10 @@ L_bn_sqr_comba4_begin: adcl $0,%ebp movl %ebx,12(%edi) movl 4(%esi),%edx - # saved r[3] - # ############### Calculate word 4 + # saved r[3] + # ############### Calculate word 4 xorl %ebx,%ebx - # sqr a[3]*a[1] + # sqr a[3]*a[1] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1206,7 +1206,7 @@ L_bn_sqr_comba4_begin: adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx - # sqr a[2]*a[2] + # sqr a[2]*a[2] mull %eax addl %eax,%ecx adcl %edx,%ebp @@ -1214,10 +1214,10 @@ L_bn_sqr_comba4_begin: adcl $0,%ebx movl %ecx,16(%edi) movl 12(%esi),%eax - # saved r[4] - # ############### Calculate word 5 + # saved r[4] + # ############### Calculate word 5 xorl %ecx,%ecx - # sqr a[3]*a[2] + # sqr a[3]*a[2] mull %edx addl %eax,%eax adcl %edx,%edx @@ -1227,16 +1227,16 @@ L_bn_sqr_comba4_begin: movl 12(%esi),%eax adcl $0,%ecx movl %ebp,20(%edi) - # saved r[5] - # ############### Calculate word 6 + # saved r[5] + # ############### Calculate word 6 xorl %ebp,%ebp - # sqr a[3]*a[3] + # sqr a[3]*a[3] mull %eax addl %eax,%ebx adcl %edx,%ecx adcl $0,%ebp movl %ebx,24(%edi) - # saved r[6] + # saved r[6] movl %ecx,28(%edi) popl %ebx popl %ebp diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/rsaz-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/rsaz-x86_64.s index 7876e0b8f93d9c..d5025b23cd668e 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/rsaz-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/rsaz-x86_64.s @@ -1453,7 +1453,7 @@ rsaz_512_mul_by_one: .type __rsaz_512_reduce,@function .align 32 __rsaz_512_reduce: -.cfi_startproc +.cfi_startproc movq %r8,%rbx imulq 128+8(%rsp),%rbx movq 0(%rbp),%rax @@ -1533,12 +1533,12 @@ __rsaz_512_reduce: jne .Lreduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reduce,.-__rsaz_512_reduce .type __rsaz_512_reducex,@function .align 32 __rsaz_512_reducex: -.cfi_startproc +.cfi_startproc imulq %r8,%rdx xorq %rsi,%rsi @@ -1591,12 +1591,12 @@ __rsaz_512_reducex: jne .Lreduction_loopx .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reducex,.-__rsaz_512_reducex .type __rsaz_512_subtract,@function .align 32 __rsaz_512_subtract: -.cfi_startproc +.cfi_startproc movq %r8,(%rdi) movq %r9,8(%rdi) movq %r10,16(%rdi) @@ -1650,12 +1650,12 @@ __rsaz_512_subtract: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_subtract,.-__rsaz_512_subtract .type __rsaz_512_mul,@function .align 32 __rsaz_512_mul: -.cfi_startproc +.cfi_startproc leaq 8(%rsp),%rdi movq (%rsi),%rax @@ -1794,12 +1794,12 @@ __rsaz_512_mul: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mul,.-__rsaz_512_mul .type __rsaz_512_mulx,@function .align 32 __rsaz_512_mulx: -.cfi_startproc +.cfi_startproc mulxq (%rsi),%rbx,%r8 movq $-6,%rcx @@ -1916,13 +1916,13 @@ __rsaz_512_mulx: movq %r15,8+64+56(%rsp) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mulx,.-__rsaz_512_mulx .globl rsaz_512_scatter4 .type rsaz_512_scatter4,@function .align 16 rsaz_512_scatter4: -.cfi_startproc +.cfi_startproc leaq (%rdi,%rdx,8),%rdi movl $8,%r9d jmp .Loop_scatter @@ -1935,14 +1935,14 @@ rsaz_512_scatter4: decl %r9d jnz .Loop_scatter .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size rsaz_512_scatter4,.-rsaz_512_scatter4 .globl rsaz_512_gather4 .type rsaz_512_gather4,@function .align 16 rsaz_512_gather4: -.cfi_startproc +.cfi_startproc movd %edx,%xmm8 movdqa .Linc+16(%rip),%xmm1 movdqa .Linc(%rip),%xmm0 @@ -2006,7 +2006,7 @@ rsaz_512_gather4: jnz .Loop_gather .byte 0xf3,0xc3 .LSEH_end_rsaz_512_gather4: -.cfi_endproc +.cfi_endproc .size rsaz_512_gather4,.-rsaz_512_gather4 .align 64 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/x86_64-mont5.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/x86_64-mont5.s index 40a60a3c8fc6b9..ab93b02d8c1aae 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/x86_64-mont5.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/bn/x86_64-mont5.s @@ -550,7 +550,7 @@ bn_mul4x_mont_gather5: .type mul4x_internal,@function .align 32 mul4x_internal: -.cfi_startproc +.cfi_startproc shlq $5,%r9 movd 8(%rax),%xmm5 leaq .Linc(%rip),%rax @@ -1072,7 +1072,7 @@ mul4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry -.cfi_endproc +.cfi_endproc .size mul4x_internal,.-mul4x_internal .globl bn_power5 .type bn_power5,@function @@ -1215,7 +1215,7 @@ bn_power5: .align 32 bn_sqr8x_internal: __bn_sqr8x_internal: -.cfi_startproc +.cfi_startproc @@ -1990,12 +1990,12 @@ __bn_sqr8x_reduction: cmpq %rdx,%rdi jb .L8x_reduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_sqr8x_internal,.-bn_sqr8x_internal .type __bn_post4x_internal,@function .align 32 __bn_post4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx @@ -2046,18 +2046,18 @@ __bn_post4x_internal: movq %r9,%r10 negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_post4x_internal,.-__bn_post4x_internal .globl bn_from_montgomery .type bn_from_montgomery,@function .align 32 bn_from_montgomery: -.cfi_startproc +.cfi_startproc testl $7,%r9d jz bn_from_mont8x xorl %eax,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_from_montgomery,.-bn_from_montgomery .type bn_from_mont8x,@function @@ -2341,7 +2341,7 @@ bn_mulx4x_mont_gather5: .type mulx4x_internal,@function .align 32 mulx4x_internal: -.cfi_startproc +.cfi_startproc movq %r9,8(%rsp) movq %r9,%r10 negq %r9 @@ -2760,7 +2760,7 @@ mulx4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry -.cfi_endproc +.cfi_endproc .size mulx4x_internal,.-mulx4x_internal .type bn_powerx5,@function .align 32 @@ -3519,7 +3519,7 @@ __bn_sqrx8x_reduction: .size bn_sqrx8x_internal,.-bn_sqrx8x_internal .align 32 __bn_postx4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 @@ -3567,13 +3567,13 @@ __bn_postx4x_internal: negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_postx4x_internal,.-__bn_postx4x_internal .globl bn_get_bits5 .type bn_get_bits5,@function .align 16 bn_get_bits5: -.cfi_startproc +.cfi_startproc leaq 0(%rdi),%r10 leaq 1(%rdi),%r11 movl %esi,%ecx @@ -3587,14 +3587,14 @@ bn_get_bits5: shrl %cl,%eax andl $31,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_get_bits5,.-bn_get_bits5 .globl bn_scatter5 .type bn_scatter5,@function .align 16 bn_scatter5: -.cfi_startproc +.cfi_startproc cmpl $0,%esi jz .Lscatter_epilogue leaq (%rdx,%rcx,8),%rdx @@ -3607,7 +3607,7 @@ bn_scatter5: jnz .Lscatter .Lscatter_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_scatter5,.-bn_scatter5 .globl bn_gather5 @@ -3615,7 +3615,7 @@ bn_scatter5: .align 32 bn_gather5: .LSEH_begin_bn_gather5: -.cfi_startproc +.cfi_startproc .byte 0x4c,0x8d,0x14,0x24 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 @@ -3773,7 +3773,7 @@ bn_gather5: leaq (%r10),%rsp .byte 0xf3,0xc3 .LSEH_end_bn_gather5: -.cfi_endproc +.cfi_endproc .size bn_gather5,.-bn_gather5 .align 64 .Linc: diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/buildinf.h b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/buildinf.h index 451c7db71f8dc8..10979e3aa32bb0 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: BSD-x86_64" -#define DATE "built on: Mon Mar 23 17:49:55 2020 UTC" +#define DATE "built on: Wed Apr 1 16:06:09 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/camellia/cmll-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/camellia/cmll-x86_64.s index eeb20dd2291da7..92056f8b1e0e0e 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/camellia/cmll-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/camellia/cmll-x86_64.s @@ -5,13 +5,13 @@ .type Camellia_EncryptBlock,@function .align 16 Camellia_EncryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Lenc_rounds -.cfi_endproc +.cfi_endproc .size Camellia_EncryptBlock,.-Camellia_EncryptBlock .globl Camellia_EncryptBlock_Rounds @@ -85,7 +85,7 @@ Camellia_EncryptBlock_Rounds: .type _x86_64_Camellia_encrypt,@function .align 16 _x86_64_Camellia_encrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -288,7 +288,7 @@ _x86_64_Camellia_encrypt: movl %edx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt @@ -296,13 +296,13 @@ _x86_64_Camellia_encrypt: .type Camellia_DecryptBlock,@function .align 16 Camellia_DecryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Ldec_rounds -.cfi_endproc +.cfi_endproc .size Camellia_DecryptBlock,.-Camellia_DecryptBlock .globl Camellia_DecryptBlock_Rounds @@ -376,7 +376,7 @@ Camellia_DecryptBlock_Rounds: .type _x86_64_Camellia_decrypt,@function .align 16 _x86_64_Camellia_decrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -580,7 +580,7 @@ _x86_64_Camellia_decrypt: movl %ebx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt .globl Camellia_Ekeygen .type Camellia_Ekeygen,@function diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/des/crypt586.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/des/crypt586.s index fa49f0e642c8c4..5dd6d612dc325d 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/des/crypt586.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/des/crypt586.s @@ -8,7 +8,7 @@ L_fcrypt_body_begin: pushl %esi pushl %edi - # Load the 2 words + # Load the 2 words xorl %edi,%edi xorl %esi,%esi call L000PIC_me_up @@ -20,7 +20,7 @@ L000PIC_me_up: pushl $25 L001start: - # Round 0 + # Round 0 movl 36(%esp),%eax movl %esi,%edx shrl $16,%edx @@ -70,7 +70,7 @@ L001start: xorl %ebx,%edi movl 32(%esp),%ebp - # Round 1 + # Round 1 movl 36(%esp),%eax movl %edi,%edx shrl $16,%edx @@ -120,7 +120,7 @@ L001start: xorl %ebx,%esi movl 32(%esp),%ebp - # Round 2 + # Round 2 movl 36(%esp),%eax movl %esi,%edx shrl $16,%edx @@ -170,7 +170,7 @@ L001start: xorl %ebx,%edi movl 32(%esp),%ebp - # Round 3 + # Round 3 movl 36(%esp),%eax movl %edi,%edx shrl $16,%edx @@ -220,7 +220,7 @@ L001start: xorl %ebx,%esi movl 32(%esp),%ebp - # Round 4 + # Round 4 movl 36(%esp),%eax movl %esi,%edx shrl $16,%edx @@ -270,7 +270,7 @@ L001start: xorl %ebx,%edi movl 32(%esp),%ebp - # Round 5 + # Round 5 movl 36(%esp),%eax movl %edi,%edx shrl $16,%edx @@ -320,7 +320,7 @@ L001start: xorl %ebx,%esi movl 32(%esp),%ebp - # Round 6 + # Round 6 movl 36(%esp),%eax movl %esi,%edx shrl $16,%edx @@ -370,7 +370,7 @@ L001start: xorl %ebx,%edi movl 32(%esp),%ebp - # Round 7 + # Round 7 movl 36(%esp),%eax movl %edi,%edx shrl $16,%edx @@ -420,7 +420,7 @@ L001start: xorl %ebx,%esi movl 32(%esp),%ebp - # Round 8 + # Round 8 movl 36(%esp),%eax movl %esi,%edx shrl $16,%edx @@ -470,7 +470,7 @@ L001start: xorl %ebx,%edi movl 32(%esp),%ebp - # Round 9 + # Round 9 movl 36(%esp),%eax movl %edi,%edx shrl $16,%edx @@ -520,7 +520,7 @@ L001start: xorl %ebx,%esi movl 32(%esp),%ebp - # Round 10 + # Round 10 movl 36(%esp),%eax movl %esi,%edx shrl $16,%edx @@ -570,7 +570,7 @@ L001start: xorl %ebx,%edi movl 32(%esp),%ebp - # Round 11 + # Round 11 movl 36(%esp),%eax movl %edi,%edx shrl $16,%edx @@ -620,7 +620,7 @@ L001start: xorl %ebx,%esi movl 32(%esp),%ebp - # Round 12 + # Round 12 movl 36(%esp),%eax movl %esi,%edx shrl $16,%edx @@ -670,7 +670,7 @@ L001start: xorl %ebx,%edi movl 32(%esp),%ebp - # Round 13 + # Round 13 movl 36(%esp),%eax movl %edi,%edx shrl $16,%edx @@ -720,7 +720,7 @@ L001start: xorl %ebx,%esi movl 32(%esp),%ebp - # Round 14 + # Round 14 movl 36(%esp),%eax movl %esi,%edx shrl $16,%edx @@ -770,7 +770,7 @@ L001start: xorl %ebx,%edi movl 32(%esp),%ebp - # Round 15 + # Round 15 movl 36(%esp),%eax movl %edi,%edx shrl $16,%edx @@ -827,7 +827,7 @@ L001start: movl %ebx,(%esp) jnz L001start - # FP + # FP movl 28(%esp),%edx rorl $1,%edi movl %esi,%eax diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/des/des-586.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/des/des-586.s index 56185ad4db7e2e..f9ec4dd2f8d822 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/des/des-586.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/des/des-586.s @@ -3,7 +3,7 @@ .align 4 __x86_DES_encrypt: pushl %ecx - # Round 0 + # Round 0 movl (%ecx),%eax xorl %ebx,%ebx movl 4(%ecx),%edx @@ -32,7 +32,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 1 + # Round 1 movl 8(%ecx),%eax xorl %ebx,%ebx movl 12(%ecx),%edx @@ -61,7 +61,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 2 + # Round 2 movl 16(%ecx),%eax xorl %ebx,%ebx movl 20(%ecx),%edx @@ -90,7 +90,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 3 + # Round 3 movl 24(%ecx),%eax xorl %ebx,%ebx movl 28(%ecx),%edx @@ -119,7 +119,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 4 + # Round 4 movl 32(%ecx),%eax xorl %ebx,%ebx movl 36(%ecx),%edx @@ -148,7 +148,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 5 + # Round 5 movl 40(%ecx),%eax xorl %ebx,%ebx movl 44(%ecx),%edx @@ -177,7 +177,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 6 + # Round 6 movl 48(%ecx),%eax xorl %ebx,%ebx movl 52(%ecx),%edx @@ -206,7 +206,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 7 + # Round 7 movl 56(%ecx),%eax xorl %ebx,%ebx movl 60(%ecx),%edx @@ -235,7 +235,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 8 + # Round 8 movl 64(%ecx),%eax xorl %ebx,%ebx movl 68(%ecx),%edx @@ -264,7 +264,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 9 + # Round 9 movl 72(%ecx),%eax xorl %ebx,%ebx movl 76(%ecx),%edx @@ -293,7 +293,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 10 + # Round 10 movl 80(%ecx),%eax xorl %ebx,%ebx movl 84(%ecx),%edx @@ -322,7 +322,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 11 + # Round 11 movl 88(%ecx),%eax xorl %ebx,%ebx movl 92(%ecx),%edx @@ -351,7 +351,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 12 + # Round 12 movl 96(%ecx),%eax xorl %ebx,%ebx movl 100(%ecx),%edx @@ -380,7 +380,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 13 + # Round 13 movl 104(%ecx),%eax xorl %ebx,%ebx movl 108(%ecx),%edx @@ -409,7 +409,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 14 + # Round 14 movl 112(%ecx),%eax xorl %ebx,%ebx movl 116(%ecx),%edx @@ -438,7 +438,7 @@ __x86_DES_encrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 15 + # Round 15 movl 120(%ecx),%eax xorl %ebx,%ebx movl 124(%ecx),%edx @@ -472,7 +472,7 @@ __x86_DES_encrypt: .align 4 __x86_DES_decrypt: pushl %ecx - # Round 15 + # Round 15 movl 120(%ecx),%eax xorl %ebx,%ebx movl 124(%ecx),%edx @@ -501,7 +501,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 14 + # Round 14 movl 112(%ecx),%eax xorl %ebx,%ebx movl 116(%ecx),%edx @@ -530,7 +530,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 13 + # Round 13 movl 104(%ecx),%eax xorl %ebx,%ebx movl 108(%ecx),%edx @@ -559,7 +559,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 12 + # Round 12 movl 96(%ecx),%eax xorl %ebx,%ebx movl 100(%ecx),%edx @@ -588,7 +588,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 11 + # Round 11 movl 88(%ecx),%eax xorl %ebx,%ebx movl 92(%ecx),%edx @@ -617,7 +617,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 10 + # Round 10 movl 80(%ecx),%eax xorl %ebx,%ebx movl 84(%ecx),%edx @@ -646,7 +646,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 9 + # Round 9 movl 72(%ecx),%eax xorl %ebx,%ebx movl 76(%ecx),%edx @@ -675,7 +675,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 8 + # Round 8 movl 64(%ecx),%eax xorl %ebx,%ebx movl 68(%ecx),%edx @@ -704,7 +704,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 7 + # Round 7 movl 56(%ecx),%eax xorl %ebx,%ebx movl 60(%ecx),%edx @@ -733,7 +733,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 6 + # Round 6 movl 48(%ecx),%eax xorl %ebx,%ebx movl 52(%ecx),%edx @@ -762,7 +762,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 5 + # Round 5 movl 40(%ecx),%eax xorl %ebx,%ebx movl 44(%ecx),%edx @@ -791,7 +791,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 4 + # Round 4 movl 32(%ecx),%eax xorl %ebx,%ebx movl 36(%ecx),%edx @@ -820,7 +820,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 3 + # Round 3 movl 24(%ecx),%eax xorl %ebx,%ebx movl 28(%ecx),%edx @@ -849,7 +849,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 2 + # Round 2 movl 16(%ecx),%eax xorl %ebx,%ebx movl 20(%ecx),%edx @@ -878,7 +878,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%esi xorl 0x500(%ebp,%edx,1),%esi - # Round 1 + # Round 1 movl 8(%ecx),%eax xorl %ebx,%ebx movl 12(%ecx),%edx @@ -907,7 +907,7 @@ __x86_DES_decrypt: movl (%esp),%ecx xorl 0x400(%ebp,%eax,1),%edi xorl 0x500(%ebp,%edx,1),%edi - # Round 0 + # Round 0 movl (%ecx),%eax xorl %ebx,%ebx movl 4(%ecx),%edx @@ -945,7 +945,7 @@ L_DES_encrypt1_begin: pushl %esi pushl %edi - # Load the 2 words + # Load the 2 words movl 12(%esp),%esi xorl %ecx,%ecx pushl %ebx @@ -954,7 +954,7 @@ L_DES_encrypt1_begin: movl 28(%esp),%ebx movl 4(%esi),%edi - # IP + # IP roll $4,%eax movl %eax,%esi xorl %edi,%eax @@ -1004,7 +1004,7 @@ L001decrypt: call __x86_DES_decrypt L002done: - # FP + # FP movl 20(%esp),%edx rorl $1,%esi movl %edi,%eax @@ -1056,7 +1056,7 @@ L_DES_encrypt2_begin: pushl %esi pushl %edi - # Load the 2 words + # Load the 2 words movl 12(%esp),%eax xorl %ecx,%ecx pushl %ebx @@ -1079,7 +1079,7 @@ L004decrypt: call __x86_DES_decrypt L005done: - # Fixup + # Fixup rorl $3,%edi movl 20(%esp),%eax rorl $3,%esi @@ -1100,12 +1100,12 @@ L_DES_encrypt3_begin: pushl %esi pushl %edi - # Load the data words + # Load the data words movl (%ebx),%edi movl 4(%ebx),%esi subl $12,%esp - # IP + # IP roll $4,%edi movl %edi,%edx xorl %esi,%edi @@ -1164,7 +1164,7 @@ L_DES_encrypt3_begin: movl (%ebx),%edi movl 4(%ebx),%esi - # FP + # FP roll $2,%esi roll $3,%edi movl %edi,%eax @@ -1219,12 +1219,12 @@ L_DES_decrypt3_begin: pushl %esi pushl %edi - # Load the data words + # Load the data words movl (%ebx),%edi movl 4(%ebx),%esi subl $12,%esp - # IP + # IP roll $4,%edi movl %edi,%edx xorl %esi,%edi @@ -1283,7 +1283,7 @@ L_DES_decrypt3_begin: movl (%ebx),%edi movl 4(%ebx),%esi - # FP + # FP roll $2,%esi roll $3,%edi movl %edi,%eax @@ -1338,7 +1338,7 @@ L_DES_ncbc_encrypt_begin: pushl %esi pushl %edi movl 28(%esp),%ebp - # getting iv ptr from parameter 4 + # getting iv ptr from parameter 4 movl 36(%esp),%ebx movl (%ebx),%esi movl 4(%ebx),%edi @@ -1349,11 +1349,11 @@ L_DES_ncbc_encrypt_begin: movl %esp,%ebx movl 36(%esp),%esi movl 40(%esp),%edi - # getting encrypt flag from parameter 5 + # getting encrypt flag from parameter 5 movl 56(%esp),%ecx - # get and push parameter 5 + # get and push parameter 5 pushl %ecx - # get and push parameter 3 + # get and push parameter 3 movl 52(%esp),%eax pushl %eax pushl %ebx @@ -1516,7 +1516,7 @@ L_DES_ede3_cbc_encrypt_begin: pushl %esi pushl %edi movl 28(%esp),%ebp - # getting iv ptr from parameter 6 + # getting iv ptr from parameter 6 movl 44(%esp),%ebx movl (%ebx),%esi movl 4(%ebx),%edi @@ -1527,15 +1527,15 @@ L_DES_ede3_cbc_encrypt_begin: movl %esp,%ebx movl 36(%esp),%esi movl 40(%esp),%edi - # getting encrypt flag from parameter 7 + # getting encrypt flag from parameter 7 movl 64(%esp),%ecx - # get and push parameter 5 + # get and push parameter 5 movl 56(%esp),%eax pushl %eax - # get and push parameter 4 + # get and push parameter 4 movl 56(%esp),%eax pushl %eax - # get and push parameter 3 + # get and push parameter 3 movl 56(%esp),%eax pushl %eax pushl %ebx diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/ec/ecp_nistz256-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/ec/ecp_nistz256-x86_64.s index 5c9e4050416212..80569cae04667e 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/ec/ecp_nistz256-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/ec/ecp_nistz256-x86_64.s @@ -3874,12 +3874,12 @@ ecp_nistz256_ord_sqr_montx: .type ecp_nistz256_to_mont,@function .align 32 ecp_nistz256_to_mont: -.cfi_startproc +.cfi_startproc movl $0x80100,%ecx andl OPENSSL_ia32cap_P+8(%rip),%ecx leaq .LRR(%rip),%rdx jmp .Lmul_mont -.cfi_endproc +.cfi_endproc .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont @@ -4823,7 +4823,7 @@ ecp_nistz256_from_mont: .type ecp_nistz256_scatter_w5,@function .align 32 ecp_nistz256_scatter_w5: -.cfi_startproc +.cfi_startproc leal -3(%rdx,%rdx,2),%edx movdqa 0(%rsi),%xmm0 shll $5,%edx @@ -4840,7 +4840,7 @@ ecp_nistz256_scatter_w5: movdqa %xmm5,80(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5 @@ -4914,7 +4914,7 @@ ecp_nistz256_gather_w5: .type ecp_nistz256_scatter_w7,@function .align 32 ecp_nistz256_scatter_w7: -.cfi_startproc +.cfi_startproc movdqu 0(%rsi),%xmm0 shll $6,%edx movdqu 16(%rsi),%xmm1 @@ -4926,7 +4926,7 @@ ecp_nistz256_scatter_w7: movdqa %xmm3,48(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/ec/x25519-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/ec/x25519-x86_64.s index 1788e568cda5d2..8fd319c83c880d 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/ec/x25519-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/ec/x25519-x86_64.s @@ -400,14 +400,14 @@ x25519_fe51_mul121666: .type x25519_fe64_eligible,@function .align 32 x25519_fe64_eligible: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+8(%rip),%ecx xorl %eax,%eax andl $0x80100,%ecx cmpl $0x80100,%ecx cmovel %ecx,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_eligible,.-x25519_fe64_eligible .globl x25519_fe64_mul @@ -650,7 +650,7 @@ x25519_fe64_sqr: .align 32 x25519_fe64_mul121666: .Lfe64_mul121666_body: -.cfi_startproc +.cfi_startproc movl $121666,%edx mulxq 0(%rsi),%r8,%rcx mulxq 8(%rsi),%r9,%rax @@ -679,7 +679,7 @@ x25519_fe64_mul121666: .Lfe64_mul121666_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_mul121666,.-x25519_fe64_mul121666 .globl x25519_fe64_add @@ -687,7 +687,7 @@ x25519_fe64_mul121666: .align 32 x25519_fe64_add: .Lfe64_add_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -716,7 +716,7 @@ x25519_fe64_add: .Lfe64_add_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_add,.-x25519_fe64_add .globl x25519_fe64_sub @@ -724,7 +724,7 @@ x25519_fe64_add: .align 32 x25519_fe64_sub: .Lfe64_sub_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -753,7 +753,7 @@ x25519_fe64_sub: .Lfe64_sub_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_sub,.-x25519_fe64_sub .globl x25519_fe64_tobytes @@ -761,7 +761,7 @@ x25519_fe64_sub: .align 32 x25519_fe64_tobytes: .Lfe64_to_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -797,6 +797,6 @@ x25519_fe64_tobytes: .Lfe64_to_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_tobytes,.-x25519_fe64_tobytes .byte 88,50,53,53,49,57,32,112,114,105,109,105,116,105,118,101,115,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/md5/md5-586.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/md5/md5-586.s index a3c9ef0bb8f149..e2ab7ac36d4ab3 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/md5/md5-586.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/md5/md5-586.s @@ -20,10 +20,10 @@ L_md5_block_asm_data_order_begin: movl 12(%edi),%edx L000start: - # R0 section + # R0 section movl %ecx,%edi movl (%esi),%ebp - # R0 0 + # R0 0 xorl %edx,%edi andl %ebx,%edi leal 3614090360(%eax,%ebp,1),%eax @@ -33,7 +33,7 @@ L000start: roll $7,%eax movl %ebx,%edi addl %ebx,%eax - # R0 1 + # R0 1 xorl %ecx,%edi andl %eax,%edi leal 3905402710(%edx,%ebp,1),%edx @@ -43,7 +43,7 @@ L000start: roll $12,%edx movl %eax,%edi addl %eax,%edx - # R0 2 + # R0 2 xorl %ebx,%edi andl %edx,%edi leal 606105819(%ecx,%ebp,1),%ecx @@ -53,7 +53,7 @@ L000start: roll $17,%ecx movl %edx,%edi addl %edx,%ecx - # R0 3 + # R0 3 xorl %eax,%edi andl %ecx,%edi leal 3250441966(%ebx,%ebp,1),%ebx @@ -63,7 +63,7 @@ L000start: roll $22,%ebx movl %ecx,%edi addl %ecx,%ebx - # R0 4 + # R0 4 xorl %edx,%edi andl %ebx,%edi leal 4118548399(%eax,%ebp,1),%eax @@ -73,7 +73,7 @@ L000start: roll $7,%eax movl %ebx,%edi addl %ebx,%eax - # R0 5 + # R0 5 xorl %ecx,%edi andl %eax,%edi leal 1200080426(%edx,%ebp,1),%edx @@ -83,7 +83,7 @@ L000start: roll $12,%edx movl %eax,%edi addl %eax,%edx - # R0 6 + # R0 6 xorl %ebx,%edi andl %edx,%edi leal 2821735955(%ecx,%ebp,1),%ecx @@ -93,7 +93,7 @@ L000start: roll $17,%ecx movl %edx,%edi addl %edx,%ecx - # R0 7 + # R0 7 xorl %eax,%edi andl %ecx,%edi leal 4249261313(%ebx,%ebp,1),%ebx @@ -103,7 +103,7 @@ L000start: roll $22,%ebx movl %ecx,%edi addl %ecx,%ebx - # R0 8 + # R0 8 xorl %edx,%edi andl %ebx,%edi leal 1770035416(%eax,%ebp,1),%eax @@ -113,7 +113,7 @@ L000start: roll $7,%eax movl %ebx,%edi addl %ebx,%eax - # R0 9 + # R0 9 xorl %ecx,%edi andl %eax,%edi leal 2336552879(%edx,%ebp,1),%edx @@ -123,7 +123,7 @@ L000start: roll $12,%edx movl %eax,%edi addl %eax,%edx - # R0 10 + # R0 10 xorl %ebx,%edi andl %edx,%edi leal 4294925233(%ecx,%ebp,1),%ecx @@ -133,7 +133,7 @@ L000start: roll $17,%ecx movl %edx,%edi addl %edx,%ecx - # R0 11 + # R0 11 xorl %eax,%edi andl %ecx,%edi leal 2304563134(%ebx,%ebp,1),%ebx @@ -143,7 +143,7 @@ L000start: roll $22,%ebx movl %ecx,%edi addl %ecx,%ebx - # R0 12 + # R0 12 xorl %edx,%edi andl %ebx,%edi leal 1804603682(%eax,%ebp,1),%eax @@ -153,7 +153,7 @@ L000start: roll $7,%eax movl %ebx,%edi addl %ebx,%eax - # R0 13 + # R0 13 xorl %ecx,%edi andl %eax,%edi leal 4254626195(%edx,%ebp,1),%edx @@ -163,7 +163,7 @@ L000start: roll $12,%edx movl %eax,%edi addl %eax,%edx - # R0 14 + # R0 14 xorl %ebx,%edi andl %edx,%edi leal 2792965006(%ecx,%ebp,1),%ecx @@ -173,7 +173,7 @@ L000start: roll $17,%ecx movl %edx,%edi addl %edx,%ecx - # R0 15 + # R0 15 xorl %eax,%edi andl %ecx,%edi leal 1236535329(%ebx,%ebp,1),%ebx @@ -184,8 +184,8 @@ L000start: movl %ecx,%edi addl %ecx,%ebx - # R1 section - # R1 16 + # R1 section + # R1 16 xorl %ebx,%edi andl %edx,%edi leal 4129170786(%eax,%ebp,1),%eax @@ -195,7 +195,7 @@ L000start: movl %ebx,%edi roll $5,%eax addl %ebx,%eax - # R1 17 + # R1 17 xorl %eax,%edi andl %ecx,%edi leal 3225465664(%edx,%ebp,1),%edx @@ -205,7 +205,7 @@ L000start: movl %eax,%edi roll $9,%edx addl %eax,%edx - # R1 18 + # R1 18 xorl %edx,%edi andl %ebx,%edi leal 643717713(%ecx,%ebp,1),%ecx @@ -215,7 +215,7 @@ L000start: movl %edx,%edi roll $14,%ecx addl %edx,%ecx - # R1 19 + # R1 19 xorl %ecx,%edi andl %eax,%edi leal 3921069994(%ebx,%ebp,1),%ebx @@ -225,7 +225,7 @@ L000start: movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx - # R1 20 + # R1 20 xorl %ebx,%edi andl %edx,%edi leal 3593408605(%eax,%ebp,1),%eax @@ -235,7 +235,7 @@ L000start: movl %ebx,%edi roll $5,%eax addl %ebx,%eax - # R1 21 + # R1 21 xorl %eax,%edi andl %ecx,%edi leal 38016083(%edx,%ebp,1),%edx @@ -245,7 +245,7 @@ L000start: movl %eax,%edi roll $9,%edx addl %eax,%edx - # R1 22 + # R1 22 xorl %edx,%edi andl %ebx,%edi leal 3634488961(%ecx,%ebp,1),%ecx @@ -255,7 +255,7 @@ L000start: movl %edx,%edi roll $14,%ecx addl %edx,%ecx - # R1 23 + # R1 23 xorl %ecx,%edi andl %eax,%edi leal 3889429448(%ebx,%ebp,1),%ebx @@ -265,7 +265,7 @@ L000start: movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx - # R1 24 + # R1 24 xorl %ebx,%edi andl %edx,%edi leal 568446438(%eax,%ebp,1),%eax @@ -275,7 +275,7 @@ L000start: movl %ebx,%edi roll $5,%eax addl %ebx,%eax - # R1 25 + # R1 25 xorl %eax,%edi andl %ecx,%edi leal 3275163606(%edx,%ebp,1),%edx @@ -285,7 +285,7 @@ L000start: movl %eax,%edi roll $9,%edx addl %eax,%edx - # R1 26 + # R1 26 xorl %edx,%edi andl %ebx,%edi leal 4107603335(%ecx,%ebp,1),%ecx @@ -295,7 +295,7 @@ L000start: movl %edx,%edi roll $14,%ecx addl %edx,%ecx - # R1 27 + # R1 27 xorl %ecx,%edi andl %eax,%edi leal 1163531501(%ebx,%ebp,1),%ebx @@ -305,7 +305,7 @@ L000start: movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx - # R1 28 + # R1 28 xorl %ebx,%edi andl %edx,%edi leal 2850285829(%eax,%ebp,1),%eax @@ -315,7 +315,7 @@ L000start: movl %ebx,%edi roll $5,%eax addl %ebx,%eax - # R1 29 + # R1 29 xorl %eax,%edi andl %ecx,%edi leal 4243563512(%edx,%ebp,1),%edx @@ -325,7 +325,7 @@ L000start: movl %eax,%edi roll $9,%edx addl %eax,%edx - # R1 30 + # R1 30 xorl %edx,%edi andl %ebx,%edi leal 1735328473(%ecx,%ebp,1),%ecx @@ -335,7 +335,7 @@ L000start: movl %edx,%edi roll $14,%ecx addl %edx,%ecx - # R1 31 + # R1 31 xorl %ecx,%edi andl %eax,%edi leal 2368359562(%ebx,%ebp,1),%ebx @@ -346,8 +346,8 @@ L000start: roll $20,%ebx addl %ecx,%ebx - # R2 section - # R2 32 + # R2 section + # R2 32 xorl %edx,%edi xorl %ebx,%edi leal 4294588738(%eax,%ebp,1),%eax @@ -355,7 +355,7 @@ L000start: movl 32(%esi),%ebp roll $4,%eax movl %ebx,%edi - # R2 33 + # R2 33 addl %ebx,%eax xorl %ecx,%edi leal 2272392833(%edx,%ebp,1),%edx @@ -365,7 +365,7 @@ L000start: movl %eax,%edi roll $11,%edx addl %eax,%edx - # R2 34 + # R2 34 xorl %ebx,%edi xorl %edx,%edi leal 1839030562(%ecx,%ebp,1),%ecx @@ -373,7 +373,7 @@ L000start: movl 56(%esi),%ebp roll $16,%ecx movl %edx,%edi - # R2 35 + # R2 35 addl %edx,%ecx xorl %eax,%edi leal 4259657740(%ebx,%ebp,1),%ebx @@ -383,7 +383,7 @@ L000start: movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx - # R2 36 + # R2 36 xorl %edx,%edi xorl %ebx,%edi leal 2763975236(%eax,%ebp,1),%eax @@ -391,7 +391,7 @@ L000start: movl 16(%esi),%ebp roll $4,%eax movl %ebx,%edi - # R2 37 + # R2 37 addl %ebx,%eax xorl %ecx,%edi leal 1272893353(%edx,%ebp,1),%edx @@ -401,7 +401,7 @@ L000start: movl %eax,%edi roll $11,%edx addl %eax,%edx - # R2 38 + # R2 38 xorl %ebx,%edi xorl %edx,%edi leal 4139469664(%ecx,%ebp,1),%ecx @@ -409,7 +409,7 @@ L000start: movl 40(%esi),%ebp roll $16,%ecx movl %edx,%edi - # R2 39 + # R2 39 addl %edx,%ecx xorl %eax,%edi leal 3200236656(%ebx,%ebp,1),%ebx @@ -419,7 +419,7 @@ L000start: movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx - # R2 40 + # R2 40 xorl %edx,%edi xorl %ebx,%edi leal 681279174(%eax,%ebp,1),%eax @@ -427,7 +427,7 @@ L000start: movl (%esi),%ebp roll $4,%eax movl %ebx,%edi - # R2 41 + # R2 41 addl %ebx,%eax xorl %ecx,%edi leal 3936430074(%edx,%ebp,1),%edx @@ -437,7 +437,7 @@ L000start: movl %eax,%edi roll $11,%edx addl %eax,%edx - # R2 42 + # R2 42 xorl %ebx,%edi xorl %edx,%edi leal 3572445317(%ecx,%ebp,1),%ecx @@ -445,7 +445,7 @@ L000start: movl 24(%esi),%ebp roll $16,%ecx movl %edx,%edi - # R2 43 + # R2 43 addl %edx,%ecx xorl %eax,%edi leal 76029189(%ebx,%ebp,1),%ebx @@ -455,7 +455,7 @@ L000start: movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx - # R2 44 + # R2 44 xorl %edx,%edi xorl %ebx,%edi leal 3654602809(%eax,%ebp,1),%eax @@ -463,7 +463,7 @@ L000start: movl 48(%esi),%ebp roll $4,%eax movl %ebx,%edi - # R2 45 + # R2 45 addl %ebx,%eax xorl %ecx,%edi leal 3873151461(%edx,%ebp,1),%edx @@ -473,7 +473,7 @@ L000start: movl %eax,%edi roll $11,%edx addl %eax,%edx - # R2 46 + # R2 46 xorl %ebx,%edi xorl %edx,%edi leal 530742520(%ecx,%ebp,1),%ecx @@ -481,7 +481,7 @@ L000start: movl 8(%esi),%ebp roll $16,%ecx movl %edx,%edi - # R2 47 + # R2 47 addl %edx,%ecx xorl %eax,%edi leal 3299628645(%ebx,%ebp,1),%ebx @@ -492,8 +492,8 @@ L000start: roll $23,%ebx addl %ecx,%ebx - # R3 section - # R3 48 + # R3 section + # R3 48 xorl %edx,%edi orl %ebx,%edi leal 4096336452(%eax,%ebp,1),%eax @@ -504,7 +504,7 @@ L000start: roll $6,%eax xorl %ecx,%edi addl %ebx,%eax - # R3 49 + # R3 49 orl %eax,%edi leal 1126891415(%edx,%ebp,1),%edx xorl %ebx,%edi @@ -514,7 +514,7 @@ L000start: roll $10,%edx xorl %ebx,%edi addl %eax,%edx - # R3 50 + # R3 50 orl %edx,%edi leal 2878612391(%ecx,%ebp,1),%ecx xorl %eax,%edi @@ -524,7 +524,7 @@ L000start: roll $15,%ecx xorl %eax,%edi addl %edx,%ecx - # R3 51 + # R3 51 orl %ecx,%edi leal 4237533241(%ebx,%ebp,1),%ebx xorl %edx,%edi @@ -534,7 +534,7 @@ L000start: roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx - # R3 52 + # R3 52 orl %ebx,%edi leal 1700485571(%eax,%ebp,1),%eax xorl %ecx,%edi @@ -544,7 +544,7 @@ L000start: roll $6,%eax xorl %ecx,%edi addl %ebx,%eax - # R3 53 + # R3 53 orl %eax,%edi leal 2399980690(%edx,%ebp,1),%edx xorl %ebx,%edi @@ -554,7 +554,7 @@ L000start: roll $10,%edx xorl %ebx,%edi addl %eax,%edx - # R3 54 + # R3 54 orl %edx,%edi leal 4293915773(%ecx,%ebp,1),%ecx xorl %eax,%edi @@ -564,7 +564,7 @@ L000start: roll $15,%ecx xorl %eax,%edi addl %edx,%ecx - # R3 55 + # R3 55 orl %ecx,%edi leal 2240044497(%ebx,%ebp,1),%ebx xorl %edx,%edi @@ -574,7 +574,7 @@ L000start: roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx - # R3 56 + # R3 56 orl %ebx,%edi leal 1873313359(%eax,%ebp,1),%eax xorl %ecx,%edi @@ -584,7 +584,7 @@ L000start: roll $6,%eax xorl %ecx,%edi addl %ebx,%eax - # R3 57 + # R3 57 orl %eax,%edi leal 4264355552(%edx,%ebp,1),%edx xorl %ebx,%edi @@ -594,7 +594,7 @@ L000start: roll $10,%edx xorl %ebx,%edi addl %eax,%edx - # R3 58 + # R3 58 orl %edx,%edi leal 2734768916(%ecx,%ebp,1),%ecx xorl %eax,%edi @@ -604,7 +604,7 @@ L000start: roll $15,%ecx xorl %eax,%edi addl %edx,%ecx - # R3 59 + # R3 59 orl %ecx,%edi leal 1309151649(%ebx,%ebp,1),%ebx xorl %edx,%edi @@ -614,7 +614,7 @@ L000start: roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx - # R3 60 + # R3 60 orl %ebx,%edi leal 4149444226(%eax,%ebp,1),%eax xorl %ecx,%edi @@ -624,7 +624,7 @@ L000start: roll $6,%eax xorl %ecx,%edi addl %ebx,%eax - # R3 61 + # R3 61 orl %eax,%edi leal 3174756917(%edx,%ebp,1),%edx xorl %ebx,%edi @@ -634,7 +634,7 @@ L000start: roll $10,%edx xorl %ebx,%edi addl %eax,%edx - # R3 62 + # R3 62 orl %edx,%edi leal 718787259(%ecx,%ebp,1),%ecx xorl %eax,%edi @@ -644,7 +644,7 @@ L000start: roll $15,%ecx xorl %eax,%edi addl %edx,%ecx - # R3 63 + # R3 63 orl %ecx,%edi leal 3951481745(%ebx,%ebp,1),%ebx xorl %edx,%edi diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/modes/aesni-gcm-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/modes/aesni-gcm-x86_64.s index 01d89630a42f73..bf508aff6ff6ec 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/modes/aesni-gcm-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/modes/aesni-gcm-x86_64.s @@ -3,7 +3,7 @@ .type _aesni_ctr32_ghash_6x,@function .align 32 _aesni_ctr32_ghash_6x: -.cfi_startproc +.cfi_startproc vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 @@ -311,7 +311,7 @@ _aesni_ctr32_ghash_6x: vpxor %xmm4,%xmm8,%xmm8 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x .globl aesni_gcm_decrypt .type aesni_gcm_decrypt,@function @@ -418,7 +418,7 @@ aesni_gcm_decrypt: .type _aesni_ctr32_6x,@function .align 32 _aesni_ctr32_6x: -.cfi_startproc +.cfi_startproc vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%rbp),%r13 @@ -505,7 +505,7 @@ _aesni_ctr32_6x: vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_6x,.-_aesni_ctr32_6x .globl aesni_gcm_encrypt diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/poly1305/poly1305-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/poly1305/poly1305-x86_64.s index 987a65aab38147..9bb9be4632f53c 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/poly1305/poly1305-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/poly1305/poly1305-x86_64.s @@ -12,7 +12,7 @@ .type poly1305_init,@function .align 32 poly1305_init: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -48,7 +48,7 @@ poly1305_init: movl $1,%eax .Lno_key: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init,.-poly1305_init .type poly1305_blocks,@function @@ -169,7 +169,7 @@ poly1305_blocks: .type poly1305_emit,@function .align 32 poly1305_emit: -.cfi_startproc +.cfi_startproc .Lemit: movq 0(%rdi),%r8 movq 8(%rdi),%r9 @@ -190,12 +190,12 @@ poly1305_emit: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit,.-poly1305_emit .type __poly1305_block,@function .align 32 __poly1305_block: -.cfi_startproc +.cfi_startproc mulq %r14 movq %rax,%r9 movq %r11,%rax @@ -235,13 +235,13 @@ __poly1305_block: adcq $0,%rbx adcq $0,%rbp .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_block,.-__poly1305_block .type __poly1305_init_avx,@function .align 32 __poly1305_init_avx: -.cfi_startproc +.cfi_startproc movq %r11,%r14 movq %r12,%rbx xorq %rbp,%rbp @@ -399,7 +399,7 @@ __poly1305_init_avx: leaq -48-64(%rdi),%rdi .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_init_avx,.-__poly1305_init_avx .type poly1305_blocks_avx,@function @@ -1240,7 +1240,7 @@ poly1305_blocks_avx: .type poly1305_emit_avx,@function .align 32 poly1305_emit_avx: -.cfi_startproc +.cfi_startproc cmpl $0,20(%rdi) je .Lemit @@ -1291,7 +1291,7 @@ poly1305_emit_avx: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_avx,.-poly1305_emit_avx .type poly1305_blocks_avx2,@function .align 32 @@ -2488,7 +2488,7 @@ poly1305_blocks_avx512: .type poly1305_init_base2_44,@function .align 32 poly1305_init_base2_44: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -2522,12 +2522,12 @@ poly1305_init_base2_44: movq %r11,8(%rdx) movl $1,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init_base2_44,.-poly1305_init_base2_44 .type poly1305_blocks_vpmadd52,@function .align 32 poly1305_blocks_vpmadd52: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52 @@ -2634,12 +2634,12 @@ poly1305_blocks_vpmadd52: .Lno_data_vpmadd52: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52 .type poly1305_blocks_vpmadd52_4x,@function .align 32 poly1305_blocks_vpmadd52_4x: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52_4x @@ -3064,12 +3064,12 @@ poly1305_blocks_vpmadd52_4x: .Lno_data_vpmadd52_4x: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x .type poly1305_blocks_vpmadd52_8x,@function .align 32 poly1305_blocks_vpmadd52_8x: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52_8x @@ -3410,12 +3410,12 @@ poly1305_blocks_vpmadd52_8x: .Lno_data_vpmadd52_8x: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x .type poly1305_emit_base2_44,@function .align 32 poly1305_emit_base2_44: -.cfi_startproc +.cfi_startproc movq 0(%rdi),%r8 movq 8(%rdi),%r9 movq 16(%rdi),%r10 @@ -3446,7 +3446,7 @@ poly1305_emit_base2_44: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_base2_44,.-poly1305_emit_base2_44 .align 64 .Lconst: @@ -3485,7 +3485,7 @@ poly1305_emit_base2_44: .type xor128_encrypt_n_pad,@function .align 16 xor128_encrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -3527,14 +3527,14 @@ xor128_encrypt_n_pad: .Ldone_enc: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad .globl xor128_decrypt_n_pad .type xor128_decrypt_n_pad,@function .align 16 xor128_decrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -3580,5 +3580,5 @@ xor128_decrypt_n_pad: .Ldone_dec: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/rc4/rc4-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/rc4/rc4-x86_64.s index b97c757550aad0..d1d1eece70bf1e 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/rc4/rc4-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/rc4/rc4-x86_64.s @@ -5,7 +5,7 @@ .type RC4,@function .align 16 RC4: -.cfi_startproc +.cfi_startproc orq %rsi,%rsi jne .Lentry .byte 0xf3,0xc3 @@ -534,7 +534,7 @@ RC4: .type RC4_set_key,@function .align 16 RC4_set_key: -.cfi_startproc +.cfi_startproc leaq 8(%rdi),%rdi leaq (%rdx,%rsi,1),%rdx negq %rsi @@ -601,14 +601,14 @@ RC4_set_key: movl %eax,-8(%rdi) movl %eax,-4(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size RC4_set_key,.-RC4_set_key .globl RC4_options .type RC4_options,@function .align 16 RC4_options: -.cfi_startproc +.cfi_startproc leaq .Lopts(%rip),%rax movl OPENSSL_ia32cap_P(%rip),%edx btl $20,%edx @@ -621,7 +621,7 @@ RC4_options: addq $12,%rax .Ldone: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .align 64 .Lopts: .byte 114,99,52,40,56,120,44,105,110,116,41,0 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/sha/keccak1600-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/sha/keccak1600-x86_64.s index 09617d014bdb7b..11f26e933d80b5 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/sha/keccak1600-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/sha/keccak1600-x86_64.s @@ -3,7 +3,7 @@ .type __KeccakF1600,@function .align 32 __KeccakF1600: -.cfi_startproc +.cfi_startproc movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx @@ -256,7 +256,7 @@ __KeccakF1600: leaq -192(%r15),%r15 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __KeccakF1600,.-__KeccakF1600 .type KeccakF1600,@function diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/sha/sha1-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/sha/sha1-x86_64.s index 98541727e555da..d4efc7206f57b3 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/sha/sha1-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/sha/sha1-x86_64.s @@ -1422,7 +1422,7 @@ _shaext_shortcut: movdqu %xmm0,(%rdi) movd %xmm1,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext .type sha1_block_data_order_ssse3,@function .align 16 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/sha/sha256-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/sha/sha256-x86_64.s index 9357385da3c49b..a7b60900fdd061 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/sha/sha256-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/sha/sha256-x86_64.s @@ -1775,7 +1775,7 @@ K256: .align 64 sha256_block_data_order_shaext: _shaext_shortcut: -.cfi_startproc +.cfi_startproc leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 @@ -1978,7 +1978,7 @@ _shaext_shortcut: movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext .type sha256_block_data_order_ssse3,@function .align 64 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/x86_64cpuid.s b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/x86_64cpuid.s index 9268ce8c9a9d63..748e6d161fa24a 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm/crypto/x86_64cpuid.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm/crypto/x86_64cpuid.s @@ -12,7 +12,7 @@ .type OPENSSL_atomic_add,@function .align 16 OPENSSL_atomic_add: -.cfi_startproc +.cfi_startproc movl (%rdi),%eax .Lspin: leaq (%rsi,%rax,1),%r8 .byte 0xf0 @@ -21,19 +21,19 @@ OPENSSL_atomic_add: movl %r8d,%eax .byte 0x48,0x98 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_atomic_add,.-OPENSSL_atomic_add .globl OPENSSL_rdtsc .type OPENSSL_rdtsc,@function .align 16 OPENSSL_rdtsc: -.cfi_startproc +.cfi_startproc rdtsc shlq $32,%rdx orq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_rdtsc,.-OPENSSL_rdtsc .globl OPENSSL_ia32_cpuid @@ -209,7 +209,7 @@ OPENSSL_ia32_cpuid: .type OPENSSL_cleanse,@function .align 16 OPENSSL_cleanse: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $15,%rsi jae .Lot @@ -239,14 +239,14 @@ OPENSSL_cleanse: cmpq $0,%rsi jne .Little .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_cleanse,.-OPENSSL_cleanse .globl CRYPTO_memcmp .type CRYPTO_memcmp,@function .align 16 CRYPTO_memcmp: -.cfi_startproc +.cfi_startproc xorq %rax,%rax xorq %r10,%r10 cmpq $0,%rdx @@ -275,13 +275,13 @@ CRYPTO_memcmp: shrq $63,%rax .Lno_data: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size CRYPTO_memcmp,.-CRYPTO_memcmp .globl OPENSSL_wipe_cpu .type OPENSSL_wipe_cpu,@function .align 16 OPENSSL_wipe_cpu: -.cfi_startproc +.cfi_startproc pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 @@ -308,13 +308,13 @@ OPENSSL_wipe_cpu: xorq %r11,%r11 leaq 8(%rsp),%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu .globl OPENSSL_instrument_bus .type OPENSSL_instrument_bus,@function .align 16 OPENSSL_instrument_bus: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rsi,%r11 @@ -341,14 +341,14 @@ OPENSSL_instrument_bus: movq %r11,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus,.-OPENSSL_instrument_bus .globl OPENSSL_instrument_bus2 .type OPENSSL_instrument_bus2,@function .align 16 OPENSSL_instrument_bus2: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rdx,%r11 @@ -391,13 +391,13 @@ OPENSSL_instrument_bus2: movq 8(%rsp),%rax subq %rcx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus2,.-OPENSSL_instrument_bus2 .globl OPENSSL_ia32_rdrand_bytes .type OPENSSL_ia32_rdrand_bytes,@function .align 16 OPENSSL_ia32_rdrand_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdrand_bytes @@ -434,13 +434,13 @@ OPENSSL_ia32_rdrand_bytes: .Ldone_rdrand_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdrand_bytes,.-OPENSSL_ia32_rdrand_bytes .globl OPENSSL_ia32_rdseed_bytes .type OPENSSL_ia32_rdseed_bytes,@function .align 16 OPENSSL_ia32_rdseed_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdseed_bytes @@ -477,5 +477,5 @@ OPENSSL_ia32_rdseed_bytes: .Ldone_rdseed_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdseed_bytes,.-OPENSSL_ia32_rdseed_bytes diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/configdata.pm b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/configdata.pm index 70e7af52d5196c..a9a502028e7bdb 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "BSD-x86_64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/aes/aesni-sha1-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/aes/aesni-sha1-x86_64.s index 978bd2b6239c15..a38e21f0484e24 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/aes/aesni-sha1-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/aes/aesni-sha1-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha1_enc,@function .align 32 aesni_cbc_sha1_enc: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+0(%rip),%r10d movq OPENSSL_ia32cap_P+4(%rip),%r11 @@ -18,7 +18,7 @@ aesni_cbc_sha1_enc: je aesni_cbc_sha1_enc_avx jmp aesni_cbc_sha1_enc_ssse3 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc .type aesni_cbc_sha1_enc_ssse3,@function .align 32 @@ -2732,7 +2732,7 @@ K_XX_XX: .type aesni_cbc_sha1_enc_shaext,@function .align 32 aesni_cbc_sha1_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 movdqu (%r9),%xmm8 movd 16(%r9),%xmm9 @@ -3031,5 +3031,5 @@ aesni_cbc_sha1_enc_shaext: movdqu %xmm8,(%r9) movd %xmm9,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/aes/aesni-sha256-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/aes/aesni-sha256-x86_64.s index dd09f1b290af62..3e56a82578a354 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/aes/aesni-sha256-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/aes/aesni-sha256-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha256_enc,@function .align 16 aesni_cbc_sha256_enc: -.cfi_startproc +.cfi_startproc leaq OPENSSL_ia32cap_P(%rip),%r11 movl $1,%eax cmpq $0,%rdi @@ -31,7 +31,7 @@ aesni_cbc_sha256_enc: ud2 .Lprobe: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc,.-aesni_cbc_sha256_enc .align 64 @@ -4081,7 +4081,7 @@ aesni_cbc_sha256_enc_avx2: .type aesni_cbc_sha256_enc_shaext,@function .align 32 aesni_cbc_sha256_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 leaq K256+128(%rip),%rax movdqu (%r9),%xmm1 @@ -4431,5 +4431,5 @@ aesni_cbc_sha256_enc_shaext: movdqu %xmm1,(%r9) movdqu %xmm2,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc_shaext,.-aesni_cbc_sha256_enc_shaext diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/aes/aesni-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/aes/aesni-x86_64.s index c1e791eff59235..1a4b22e7b8a5d9 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/aes/aesni-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/aes/aesni-x86_64.s @@ -861,7 +861,7 @@ aesni_ecb_encrypt: .type aesni_ccm64_encrypt_blocks,@function .align 16 aesni_ccm64_encrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movdqu (%r8),%xmm6 movdqa .Lincrement64(%rip),%xmm9 @@ -920,13 +920,13 @@ aesni_ccm64_encrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks .globl aesni_ccm64_decrypt_blocks .type aesni_ccm64_decrypt_blocks,@function .align 16 aesni_ccm64_decrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movups (%r8),%xmm6 movdqu (%r9),%xmm3 @@ -1019,7 +1019,7 @@ aesni_ccm64_decrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks .globl aesni_ctr32_encrypt_blocks .type aesni_ctr32_encrypt_blocks,@function @@ -2794,7 +2794,7 @@ aesni_ocb_encrypt: .type __ocb_encrypt6,@function .align 32 __ocb_encrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2892,13 +2892,13 @@ __ocb_encrypt6: .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt6,.-__ocb_encrypt6 .type __ocb_encrypt4,@function .align 32 __ocb_encrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2963,13 +2963,13 @@ __ocb_encrypt4: .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt4,.-__ocb_encrypt4 .type __ocb_encrypt1,@function .align 32 __ocb_encrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm2,%xmm8 @@ -3000,7 +3000,7 @@ __ocb_encrypt1: .byte 102,15,56,221,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt1,.-__ocb_encrypt1 .globl aesni_ocb_decrypt @@ -3243,7 +3243,7 @@ aesni_ocb_decrypt: .type __ocb_decrypt6,@function .align 32 __ocb_decrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3335,13 +3335,13 @@ __ocb_decrypt6: .byte 102,65,15,56,223,246 .byte 102,65,15,56,223,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt6,.-__ocb_decrypt6 .type __ocb_decrypt4,@function .align 32 __ocb_decrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3402,13 +3402,13 @@ __ocb_decrypt4: .byte 102,65,15,56,223,228 .byte 102,65,15,56,223,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt4,.-__ocb_decrypt4 .type __ocb_decrypt1,@function .align 32 __ocb_decrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm7,%xmm2 @@ -3438,7 +3438,7 @@ __ocb_decrypt1: .byte 102,15,56,223,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt1,.-__ocb_decrypt1 .globl aesni_cbc_encrypt .type aesni_cbc_encrypt,@function @@ -4447,7 +4447,7 @@ __aesni_set_encrypt_key: shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_set_encrypt_key,.-aesni_set_encrypt_key .size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key .align 64 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/bn/rsaz-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/bn/rsaz-x86_64.s index 7876e0b8f93d9c..d5025b23cd668e 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/bn/rsaz-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/bn/rsaz-x86_64.s @@ -1453,7 +1453,7 @@ rsaz_512_mul_by_one: .type __rsaz_512_reduce,@function .align 32 __rsaz_512_reduce: -.cfi_startproc +.cfi_startproc movq %r8,%rbx imulq 128+8(%rsp),%rbx movq 0(%rbp),%rax @@ -1533,12 +1533,12 @@ __rsaz_512_reduce: jne .Lreduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reduce,.-__rsaz_512_reduce .type __rsaz_512_reducex,@function .align 32 __rsaz_512_reducex: -.cfi_startproc +.cfi_startproc imulq %r8,%rdx xorq %rsi,%rsi @@ -1591,12 +1591,12 @@ __rsaz_512_reducex: jne .Lreduction_loopx .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reducex,.-__rsaz_512_reducex .type __rsaz_512_subtract,@function .align 32 __rsaz_512_subtract: -.cfi_startproc +.cfi_startproc movq %r8,(%rdi) movq %r9,8(%rdi) movq %r10,16(%rdi) @@ -1650,12 +1650,12 @@ __rsaz_512_subtract: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_subtract,.-__rsaz_512_subtract .type __rsaz_512_mul,@function .align 32 __rsaz_512_mul: -.cfi_startproc +.cfi_startproc leaq 8(%rsp),%rdi movq (%rsi),%rax @@ -1794,12 +1794,12 @@ __rsaz_512_mul: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mul,.-__rsaz_512_mul .type __rsaz_512_mulx,@function .align 32 __rsaz_512_mulx: -.cfi_startproc +.cfi_startproc mulxq (%rsi),%rbx,%r8 movq $-6,%rcx @@ -1916,13 +1916,13 @@ __rsaz_512_mulx: movq %r15,8+64+56(%rsp) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mulx,.-__rsaz_512_mulx .globl rsaz_512_scatter4 .type rsaz_512_scatter4,@function .align 16 rsaz_512_scatter4: -.cfi_startproc +.cfi_startproc leaq (%rdi,%rdx,8),%rdi movl $8,%r9d jmp .Loop_scatter @@ -1935,14 +1935,14 @@ rsaz_512_scatter4: decl %r9d jnz .Loop_scatter .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size rsaz_512_scatter4,.-rsaz_512_scatter4 .globl rsaz_512_gather4 .type rsaz_512_gather4,@function .align 16 rsaz_512_gather4: -.cfi_startproc +.cfi_startproc movd %edx,%xmm8 movdqa .Linc+16(%rip),%xmm1 movdqa .Linc(%rip),%xmm0 @@ -2006,7 +2006,7 @@ rsaz_512_gather4: jnz .Loop_gather .byte 0xf3,0xc3 .LSEH_end_rsaz_512_gather4: -.cfi_endproc +.cfi_endproc .size rsaz_512_gather4,.-rsaz_512_gather4 .align 64 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/bn/x86_64-mont5.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/bn/x86_64-mont5.s index 40a60a3c8fc6b9..ab93b02d8c1aae 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/bn/x86_64-mont5.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/bn/x86_64-mont5.s @@ -550,7 +550,7 @@ bn_mul4x_mont_gather5: .type mul4x_internal,@function .align 32 mul4x_internal: -.cfi_startproc +.cfi_startproc shlq $5,%r9 movd 8(%rax),%xmm5 leaq .Linc(%rip),%rax @@ -1072,7 +1072,7 @@ mul4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry -.cfi_endproc +.cfi_endproc .size mul4x_internal,.-mul4x_internal .globl bn_power5 .type bn_power5,@function @@ -1215,7 +1215,7 @@ bn_power5: .align 32 bn_sqr8x_internal: __bn_sqr8x_internal: -.cfi_startproc +.cfi_startproc @@ -1990,12 +1990,12 @@ __bn_sqr8x_reduction: cmpq %rdx,%rdi jb .L8x_reduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_sqr8x_internal,.-bn_sqr8x_internal .type __bn_post4x_internal,@function .align 32 __bn_post4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx @@ -2046,18 +2046,18 @@ __bn_post4x_internal: movq %r9,%r10 negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_post4x_internal,.-__bn_post4x_internal .globl bn_from_montgomery .type bn_from_montgomery,@function .align 32 bn_from_montgomery: -.cfi_startproc +.cfi_startproc testl $7,%r9d jz bn_from_mont8x xorl %eax,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_from_montgomery,.-bn_from_montgomery .type bn_from_mont8x,@function @@ -2341,7 +2341,7 @@ bn_mulx4x_mont_gather5: .type mulx4x_internal,@function .align 32 mulx4x_internal: -.cfi_startproc +.cfi_startproc movq %r9,8(%rsp) movq %r9,%r10 negq %r9 @@ -2760,7 +2760,7 @@ mulx4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry -.cfi_endproc +.cfi_endproc .size mulx4x_internal,.-mulx4x_internal .type bn_powerx5,@function .align 32 @@ -3519,7 +3519,7 @@ __bn_sqrx8x_reduction: .size bn_sqrx8x_internal,.-bn_sqrx8x_internal .align 32 __bn_postx4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 @@ -3567,13 +3567,13 @@ __bn_postx4x_internal: negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_postx4x_internal,.-__bn_postx4x_internal .globl bn_get_bits5 .type bn_get_bits5,@function .align 16 bn_get_bits5: -.cfi_startproc +.cfi_startproc leaq 0(%rdi),%r10 leaq 1(%rdi),%r11 movl %esi,%ecx @@ -3587,14 +3587,14 @@ bn_get_bits5: shrl %cl,%eax andl $31,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_get_bits5,.-bn_get_bits5 .globl bn_scatter5 .type bn_scatter5,@function .align 16 bn_scatter5: -.cfi_startproc +.cfi_startproc cmpl $0,%esi jz .Lscatter_epilogue leaq (%rdx,%rcx,8),%rdx @@ -3607,7 +3607,7 @@ bn_scatter5: jnz .Lscatter .Lscatter_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_scatter5,.-bn_scatter5 .globl bn_gather5 @@ -3615,7 +3615,7 @@ bn_scatter5: .align 32 bn_gather5: .LSEH_begin_bn_gather5: -.cfi_startproc +.cfi_startproc .byte 0x4c,0x8d,0x14,0x24 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 @@ -3773,7 +3773,7 @@ bn_gather5: leaq (%r10),%rsp .byte 0xf3,0xc3 .LSEH_end_bn_gather5: -.cfi_endproc +.cfi_endproc .size bn_gather5,.-bn_gather5 .align 64 .Linc: diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/buildinf.h index caf96d4119f12b..764dd70dff0289 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: BSD-x86_64" -#define DATE "built on: Mon Mar 23 17:50:31 2020 UTC" +#define DATE "built on: Wed Apr 1 16:06:19 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/camellia/cmll-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/camellia/cmll-x86_64.s index eeb20dd2291da7..92056f8b1e0e0e 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/camellia/cmll-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/camellia/cmll-x86_64.s @@ -5,13 +5,13 @@ .type Camellia_EncryptBlock,@function .align 16 Camellia_EncryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Lenc_rounds -.cfi_endproc +.cfi_endproc .size Camellia_EncryptBlock,.-Camellia_EncryptBlock .globl Camellia_EncryptBlock_Rounds @@ -85,7 +85,7 @@ Camellia_EncryptBlock_Rounds: .type _x86_64_Camellia_encrypt,@function .align 16 _x86_64_Camellia_encrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -288,7 +288,7 @@ _x86_64_Camellia_encrypt: movl %edx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt @@ -296,13 +296,13 @@ _x86_64_Camellia_encrypt: .type Camellia_DecryptBlock,@function .align 16 Camellia_DecryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Ldec_rounds -.cfi_endproc +.cfi_endproc .size Camellia_DecryptBlock,.-Camellia_DecryptBlock .globl Camellia_DecryptBlock_Rounds @@ -376,7 +376,7 @@ Camellia_DecryptBlock_Rounds: .type _x86_64_Camellia_decrypt,@function .align 16 _x86_64_Camellia_decrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -580,7 +580,7 @@ _x86_64_Camellia_decrypt: movl %ebx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt .globl Camellia_Ekeygen .type Camellia_Ekeygen,@function diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s index 5c9e4050416212..80569cae04667e 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s @@ -3874,12 +3874,12 @@ ecp_nistz256_ord_sqr_montx: .type ecp_nistz256_to_mont,@function .align 32 ecp_nistz256_to_mont: -.cfi_startproc +.cfi_startproc movl $0x80100,%ecx andl OPENSSL_ia32cap_P+8(%rip),%ecx leaq .LRR(%rip),%rdx jmp .Lmul_mont -.cfi_endproc +.cfi_endproc .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont @@ -4823,7 +4823,7 @@ ecp_nistz256_from_mont: .type ecp_nistz256_scatter_w5,@function .align 32 ecp_nistz256_scatter_w5: -.cfi_startproc +.cfi_startproc leal -3(%rdx,%rdx,2),%edx movdqa 0(%rsi),%xmm0 shll $5,%edx @@ -4840,7 +4840,7 @@ ecp_nistz256_scatter_w5: movdqa %xmm5,80(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5 @@ -4914,7 +4914,7 @@ ecp_nistz256_gather_w5: .type ecp_nistz256_scatter_w7,@function .align 32 ecp_nistz256_scatter_w7: -.cfi_startproc +.cfi_startproc movdqu 0(%rsi),%xmm0 shll $6,%edx movdqu 16(%rsi),%xmm1 @@ -4926,7 +4926,7 @@ ecp_nistz256_scatter_w7: movdqa %xmm3,48(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/ec/x25519-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/ec/x25519-x86_64.s index 1788e568cda5d2..8fd319c83c880d 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/ec/x25519-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/ec/x25519-x86_64.s @@ -400,14 +400,14 @@ x25519_fe51_mul121666: .type x25519_fe64_eligible,@function .align 32 x25519_fe64_eligible: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+8(%rip),%ecx xorl %eax,%eax andl $0x80100,%ecx cmpl $0x80100,%ecx cmovel %ecx,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_eligible,.-x25519_fe64_eligible .globl x25519_fe64_mul @@ -650,7 +650,7 @@ x25519_fe64_sqr: .align 32 x25519_fe64_mul121666: .Lfe64_mul121666_body: -.cfi_startproc +.cfi_startproc movl $121666,%edx mulxq 0(%rsi),%r8,%rcx mulxq 8(%rsi),%r9,%rax @@ -679,7 +679,7 @@ x25519_fe64_mul121666: .Lfe64_mul121666_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_mul121666,.-x25519_fe64_mul121666 .globl x25519_fe64_add @@ -687,7 +687,7 @@ x25519_fe64_mul121666: .align 32 x25519_fe64_add: .Lfe64_add_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -716,7 +716,7 @@ x25519_fe64_add: .Lfe64_add_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_add,.-x25519_fe64_add .globl x25519_fe64_sub @@ -724,7 +724,7 @@ x25519_fe64_add: .align 32 x25519_fe64_sub: .Lfe64_sub_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -753,7 +753,7 @@ x25519_fe64_sub: .Lfe64_sub_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_sub,.-x25519_fe64_sub .globl x25519_fe64_tobytes @@ -761,7 +761,7 @@ x25519_fe64_sub: .align 32 x25519_fe64_tobytes: .Lfe64_to_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -797,6 +797,6 @@ x25519_fe64_tobytes: .Lfe64_to_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_tobytes,.-x25519_fe64_tobytes .byte 88,50,53,53,49,57,32,112,114,105,109,105,116,105,118,101,115,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/modes/aesni-gcm-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/modes/aesni-gcm-x86_64.s index 01d89630a42f73..bf508aff6ff6ec 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/modes/aesni-gcm-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/modes/aesni-gcm-x86_64.s @@ -3,7 +3,7 @@ .type _aesni_ctr32_ghash_6x,@function .align 32 _aesni_ctr32_ghash_6x: -.cfi_startproc +.cfi_startproc vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 @@ -311,7 +311,7 @@ _aesni_ctr32_ghash_6x: vpxor %xmm4,%xmm8,%xmm8 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x .globl aesni_gcm_decrypt .type aesni_gcm_decrypt,@function @@ -418,7 +418,7 @@ aesni_gcm_decrypt: .type _aesni_ctr32_6x,@function .align 32 _aesni_ctr32_6x: -.cfi_startproc +.cfi_startproc vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%rbp),%r13 @@ -505,7 +505,7 @@ _aesni_ctr32_6x: vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_6x,.-_aesni_ctr32_6x .globl aesni_gcm_encrypt diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/poly1305/poly1305-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/poly1305/poly1305-x86_64.s index 2636c52bbe5e48..8f2554e047f1bf 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/poly1305/poly1305-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/poly1305/poly1305-x86_64.s @@ -12,7 +12,7 @@ .type poly1305_init,@function .align 32 poly1305_init: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -43,7 +43,7 @@ poly1305_init: movl $1,%eax .Lno_key: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init,.-poly1305_init .type poly1305_blocks,@function @@ -164,7 +164,7 @@ poly1305_blocks: .type poly1305_emit,@function .align 32 poly1305_emit: -.cfi_startproc +.cfi_startproc .Lemit: movq 0(%rdi),%r8 movq 8(%rdi),%r9 @@ -185,12 +185,12 @@ poly1305_emit: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit,.-poly1305_emit .type __poly1305_block,@function .align 32 __poly1305_block: -.cfi_startproc +.cfi_startproc mulq %r14 movq %rax,%r9 movq %r11,%rax @@ -230,13 +230,13 @@ __poly1305_block: adcq $0,%rbx adcq $0,%rbp .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_block,.-__poly1305_block .type __poly1305_init_avx,@function .align 32 __poly1305_init_avx: -.cfi_startproc +.cfi_startproc movq %r11,%r14 movq %r12,%rbx xorq %rbp,%rbp @@ -394,7 +394,7 @@ __poly1305_init_avx: leaq -48-64(%rdi),%rdi .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_init_avx,.-__poly1305_init_avx .type poly1305_blocks_avx,@function @@ -1235,7 +1235,7 @@ poly1305_blocks_avx: .type poly1305_emit_avx,@function .align 32 poly1305_emit_avx: -.cfi_startproc +.cfi_startproc cmpl $0,20(%rdi) je .Lemit @@ -1286,7 +1286,7 @@ poly1305_emit_avx: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_avx,.-poly1305_emit_avx .type poly1305_blocks_avx2,@function .align 32 @@ -1969,7 +1969,7 @@ poly1305_blocks_avx2: .type xor128_encrypt_n_pad,@function .align 16 xor128_encrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -2011,14 +2011,14 @@ xor128_encrypt_n_pad: .Ldone_enc: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad .globl xor128_decrypt_n_pad .type xor128_decrypt_n_pad,@function .align 16 xor128_decrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -2064,5 +2064,5 @@ xor128_decrypt_n_pad: .Ldone_dec: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/rc4/rc4-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/rc4/rc4-x86_64.s index b97c757550aad0..d1d1eece70bf1e 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/rc4/rc4-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/rc4/rc4-x86_64.s @@ -5,7 +5,7 @@ .type RC4,@function .align 16 RC4: -.cfi_startproc +.cfi_startproc orq %rsi,%rsi jne .Lentry .byte 0xf3,0xc3 @@ -534,7 +534,7 @@ RC4: .type RC4_set_key,@function .align 16 RC4_set_key: -.cfi_startproc +.cfi_startproc leaq 8(%rdi),%rdi leaq (%rdx,%rsi,1),%rdx negq %rsi @@ -601,14 +601,14 @@ RC4_set_key: movl %eax,-8(%rdi) movl %eax,-4(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size RC4_set_key,.-RC4_set_key .globl RC4_options .type RC4_options,@function .align 16 RC4_options: -.cfi_startproc +.cfi_startproc leaq .Lopts(%rip),%rax movl OPENSSL_ia32cap_P(%rip),%edx btl $20,%edx @@ -621,7 +621,7 @@ RC4_options: addq $12,%rax .Ldone: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .align 64 .Lopts: .byte 114,99,52,40,56,120,44,105,110,116,41,0 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/sha/keccak1600-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/sha/keccak1600-x86_64.s index 09617d014bdb7b..11f26e933d80b5 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/sha/keccak1600-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/sha/keccak1600-x86_64.s @@ -3,7 +3,7 @@ .type __KeccakF1600,@function .align 32 __KeccakF1600: -.cfi_startproc +.cfi_startproc movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx @@ -256,7 +256,7 @@ __KeccakF1600: leaq -192(%r15),%r15 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __KeccakF1600,.-__KeccakF1600 .type KeccakF1600,@function diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/sha/sha1-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/sha/sha1-x86_64.s index 98541727e555da..d4efc7206f57b3 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/sha/sha1-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/sha/sha1-x86_64.s @@ -1422,7 +1422,7 @@ _shaext_shortcut: movdqu %xmm0,(%rdi) movd %xmm1,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext .type sha1_block_data_order_ssse3,@function .align 16 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/sha/sha256-x86_64.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/sha/sha256-x86_64.s index 9357385da3c49b..a7b60900fdd061 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/sha/sha256-x86_64.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/sha/sha256-x86_64.s @@ -1775,7 +1775,7 @@ K256: .align 64 sha256_block_data_order_shaext: _shaext_shortcut: -.cfi_startproc +.cfi_startproc leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 @@ -1978,7 +1978,7 @@ _shaext_shortcut: movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext .type sha256_block_data_order_ssse3,@function .align 64 diff --git a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/x86_64cpuid.s b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/x86_64cpuid.s index 9268ce8c9a9d63..748e6d161fa24a 100644 --- a/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/x86_64cpuid.s +++ b/deps/openssl/config/archs/BSD-x86_64/asm_avx2/crypto/x86_64cpuid.s @@ -12,7 +12,7 @@ .type OPENSSL_atomic_add,@function .align 16 OPENSSL_atomic_add: -.cfi_startproc +.cfi_startproc movl (%rdi),%eax .Lspin: leaq (%rsi,%rax,1),%r8 .byte 0xf0 @@ -21,19 +21,19 @@ OPENSSL_atomic_add: movl %r8d,%eax .byte 0x48,0x98 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_atomic_add,.-OPENSSL_atomic_add .globl OPENSSL_rdtsc .type OPENSSL_rdtsc,@function .align 16 OPENSSL_rdtsc: -.cfi_startproc +.cfi_startproc rdtsc shlq $32,%rdx orq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_rdtsc,.-OPENSSL_rdtsc .globl OPENSSL_ia32_cpuid @@ -209,7 +209,7 @@ OPENSSL_ia32_cpuid: .type OPENSSL_cleanse,@function .align 16 OPENSSL_cleanse: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $15,%rsi jae .Lot @@ -239,14 +239,14 @@ OPENSSL_cleanse: cmpq $0,%rsi jne .Little .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_cleanse,.-OPENSSL_cleanse .globl CRYPTO_memcmp .type CRYPTO_memcmp,@function .align 16 CRYPTO_memcmp: -.cfi_startproc +.cfi_startproc xorq %rax,%rax xorq %r10,%r10 cmpq $0,%rdx @@ -275,13 +275,13 @@ CRYPTO_memcmp: shrq $63,%rax .Lno_data: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size CRYPTO_memcmp,.-CRYPTO_memcmp .globl OPENSSL_wipe_cpu .type OPENSSL_wipe_cpu,@function .align 16 OPENSSL_wipe_cpu: -.cfi_startproc +.cfi_startproc pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 @@ -308,13 +308,13 @@ OPENSSL_wipe_cpu: xorq %r11,%r11 leaq 8(%rsp),%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu .globl OPENSSL_instrument_bus .type OPENSSL_instrument_bus,@function .align 16 OPENSSL_instrument_bus: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rsi,%r11 @@ -341,14 +341,14 @@ OPENSSL_instrument_bus: movq %r11,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus,.-OPENSSL_instrument_bus .globl OPENSSL_instrument_bus2 .type OPENSSL_instrument_bus2,@function .align 16 OPENSSL_instrument_bus2: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rdx,%r11 @@ -391,13 +391,13 @@ OPENSSL_instrument_bus2: movq 8(%rsp),%rax subq %rcx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus2,.-OPENSSL_instrument_bus2 .globl OPENSSL_ia32_rdrand_bytes .type OPENSSL_ia32_rdrand_bytes,@function .align 16 OPENSSL_ia32_rdrand_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdrand_bytes @@ -434,13 +434,13 @@ OPENSSL_ia32_rdrand_bytes: .Ldone_rdrand_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdrand_bytes,.-OPENSSL_ia32_rdrand_bytes .globl OPENSSL_ia32_rdseed_bytes .type OPENSSL_ia32_rdseed_bytes,@function .align 16 OPENSSL_ia32_rdseed_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdseed_bytes @@ -477,5 +477,5 @@ OPENSSL_ia32_rdseed_bytes: .Ldone_rdseed_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdseed_bytes,.-OPENSSL_ia32_rdseed_bytes diff --git a/deps/openssl/config/archs/BSD-x86_64/no-asm/configdata.pm b/deps/openssl/config/archs/BSD-x86_64/no-asm/configdata.pm index bd65c6cbf049c3..24b990453eadff 100644 --- a/deps/openssl/config/archs/BSD-x86_64/no-asm/configdata.pm +++ b/deps/openssl/config/archs/BSD-x86_64/no-asm/configdata.pm @@ -110,8 +110,8 @@ our %config = ( sourcedir => ".", target => "BSD-x86_64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/BSD-x86_64/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/BSD-x86_64/no-asm/crypto/buildinf.h index 8cfe8fd5340418..3071e7f2c53561 100644 --- a/deps/openssl/config/archs/BSD-x86_64/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/BSD-x86_64/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: BSD-x86_64" -#define DATE "built on: Mon Mar 23 17:50:55 2020 UTC" +#define DATE "built on: Wed Apr 1 16:06:28 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/VC-WIN32/asm/configdata.pm b/deps/openssl/config/archs/VC-WIN32/asm/configdata.pm index 7941932e4c6090..a98fd3c5150cf3 100644 --- a/deps/openssl/config/archs/VC-WIN32/asm/configdata.pm +++ b/deps/openssl/config/archs/VC-WIN32/asm/configdata.pm @@ -115,8 +115,8 @@ our %config = ( sourcedir => ".", target => "VC-WIN32", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -132,7 +132,7 @@ our %target = ( LDFLAGS => "/nologo /debug", MT => "mt", MTFLAGS => "-nologo", - RANLIB => "CODE(0x556a7eaf0b80)", + RANLIB => "CODE(0x557db897a670)", RC => "rc", _conf_fname_int => [ "Configurations/00-base-templates.conf", "Configurations/00-base-templates.conf", "Configurations/10-main.conf", "Configurations/10-main.conf", "Configurations/00-base-templates.conf", "Configurations/10-main.conf", "Configurations/shared-info.pl" ], aes_asm_src => "aes_core.c aes_cbc.c vpaes-x86.s aesni-x86.s", diff --git a/deps/openssl/config/archs/VC-WIN32/asm/crypto/buildinf.h b/deps/openssl/config/archs/VC-WIN32/asm/crypto/buildinf.h index 578961f91e5ace..aceb03e93ec96d 100644 --- a/deps/openssl/config/archs/VC-WIN32/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/VC-WIN32/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: " -#define DATE "built on: Mon Mar 23 18:01:35 2020 UTC" +#define DATE "built on: Wed Apr 1 16:09:45 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/VC-WIN32/asm_avx2/configdata.pm b/deps/openssl/config/archs/VC-WIN32/asm_avx2/configdata.pm index 830dd5f6b42dfe..4eb51029056db4 100644 --- a/deps/openssl/config/archs/VC-WIN32/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/VC-WIN32/asm_avx2/configdata.pm @@ -115,8 +115,8 @@ our %config = ( sourcedir => ".", target => "VC-WIN32", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -132,7 +132,7 @@ our %target = ( LDFLAGS => "/nologo /debug", MT => "mt", MTFLAGS => "-nologo", - RANLIB => "CODE(0x556b133e59b0)", + RANLIB => "CODE(0x5571a117bc70)", RC => "rc", _conf_fname_int => [ "Configurations/00-base-templates.conf", "Configurations/00-base-templates.conf", "Configurations/10-main.conf", "Configurations/10-main.conf", "Configurations/00-base-templates.conf", "Configurations/10-main.conf", "Configurations/shared-info.pl" ], aes_asm_src => "aes_core.c aes_cbc.c vpaes-x86.s aesni-x86.s", diff --git a/deps/openssl/config/archs/VC-WIN32/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/VC-WIN32/asm_avx2/crypto/buildinf.h index e86f307d3657d1..a3e79de82ccb99 100644 --- a/deps/openssl/config/archs/VC-WIN32/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/VC-WIN32/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: " -#define DATE "built on: Mon Mar 23 18:01:42 2020 UTC" +#define DATE "built on: Wed Apr 1 16:09:49 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/VC-WIN32/no-asm/configdata.pm b/deps/openssl/config/archs/VC-WIN32/no-asm/configdata.pm index 6d0fede2b4676c..eaaa4a550293d9 100644 --- a/deps/openssl/config/archs/VC-WIN32/no-asm/configdata.pm +++ b/deps/openssl/config/archs/VC-WIN32/no-asm/configdata.pm @@ -114,8 +114,8 @@ our %config = ( sourcedir => ".", target => "VC-WIN32", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -131,7 +131,7 @@ our %target = ( LDFLAGS => "/nologo /debug", MT => "mt", MTFLAGS => "-nologo", - RANLIB => "CODE(0x560a3912c520)", + RANLIB => "CODE(0x55fb6ffae020)", RC => "rc", _conf_fname_int => [ "Configurations/00-base-templates.conf", "Configurations/00-base-templates.conf", "Configurations/10-main.conf", "Configurations/10-main.conf", "Configurations/10-main.conf", "Configurations/shared-info.pl" ], aes_asm_src => "aes_core.c aes_cbc.c", diff --git a/deps/openssl/config/archs/VC-WIN32/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/VC-WIN32/no-asm/crypto/buildinf.h index 88530f5188add2..817bcd8bbc9bf2 100644 --- a/deps/openssl/config/archs/VC-WIN32/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/VC-WIN32/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: " -#define DATE "built on: Mon Mar 23 18:01:50 2020 UTC" +#define DATE "built on: Wed Apr 1 16:09:52 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/VC-WIN64-ARM/no-asm/configdata.pm b/deps/openssl/config/archs/VC-WIN64-ARM/no-asm/configdata.pm index 174ee6b9653c63..088a07a830ca5f 100644 --- a/deps/openssl/config/archs/VC-WIN64-ARM/no-asm/configdata.pm +++ b/deps/openssl/config/archs/VC-WIN64-ARM/no-asm/configdata.pm @@ -113,8 +113,8 @@ our %config = ( sourcedir => ".", target => "VC-WIN64-ARM", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -128,7 +128,7 @@ our %target = ( LDFLAGS => "/nologo /debug", MT => "mt", MTFLAGS => "-nologo", - RANLIB => "CODE(0x55dfe41016e0)", + RANLIB => "CODE(0x558d8c3b7990)", RC => "rc", _conf_fname_int => [ "Configurations/00-base-templates.conf", "Configurations/00-base-templates.conf", "Configurations/10-main.conf", "Configurations/10-main.conf", "Configurations/50-win-onecore.conf", "Configurations/shared-info.pl" ], aes_asm_src => "aes_core.c aes_cbc.c", diff --git a/deps/openssl/config/archs/VC-WIN64-ARM/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/VC-WIN64-ARM/no-asm/crypto/buildinf.h index 953068e86b36e1..fb65fa68266303 100644 --- a/deps/openssl/config/archs/VC-WIN64-ARM/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/VC-WIN64-ARM/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: VC-WIN64-ARM" -#define DATE "built on: Mon Mar 23 18:01:54 2020 UTC" +#define DATE "built on: Wed Apr 1 16:09:55 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/VC-WIN64A/asm/configdata.pm b/deps/openssl/config/archs/VC-WIN64A/asm/configdata.pm index 32bbc8e9d04f9d..32e5382acd0c56 100644 --- a/deps/openssl/config/archs/VC-WIN64A/asm/configdata.pm +++ b/deps/openssl/config/archs/VC-WIN64A/asm/configdata.pm @@ -116,8 +116,8 @@ our %config = ( sourcedir => ".", target => "VC-WIN64A", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -133,7 +133,7 @@ our %target = ( LDFLAGS => "/nologo /debug", MT => "mt", MTFLAGS => "-nologo", - RANLIB => "CODE(0x560ffc7fe780)", + RANLIB => "CODE(0x55c4d91a81f0)", RC => "rc", _conf_fname_int => [ "Configurations/00-base-templates.conf", "Configurations/00-base-templates.conf", "Configurations/10-main.conf", "Configurations/10-main.conf", "Configurations/10-main.conf", "Configurations/00-base-templates.conf", "Configurations/10-main.conf", "Configurations/shared-info.pl" ], aes_asm_src => "aes_core.c aes_cbc.c vpaes-x86_64.s aesni-x86_64.s aesni-sha1-x86_64.s aesni-sha256-x86_64.s aesni-mb-x86_64.s", diff --git a/deps/openssl/config/archs/VC-WIN64A/asm/crypto/buildinf.h b/deps/openssl/config/archs/VC-WIN64A/asm/crypto/buildinf.h index 7ac964bd77c414..279ff8b8e2ceb8 100644 --- a/deps/openssl/config/archs/VC-WIN64A/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/VC-WIN64A/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: " -#define DATE "built on: Mon Mar 23 18:00:48 2020 UTC" +#define DATE "built on: Wed Apr 1 16:09:21 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/VC-WIN64A/asm/crypto/x86_64cpuid.asm b/deps/openssl/config/archs/VC-WIN64A/asm/crypto/x86_64cpuid.asm index f57d3210fc935f..440238dedf3267 100644 --- a/deps/openssl/config/archs/VC-WIN64A/asm/crypto/x86_64cpuid.asm +++ b/deps/openssl/config/archs/VC-WIN64A/asm/crypto/x86_64cpuid.asm @@ -478,3 +478,4 @@ $L$done_rdseed_bytes: xor r10,r10 DB 0F3h,0C3h ;repret + diff --git a/deps/openssl/config/archs/VC-WIN64A/asm_avx2/configdata.pm b/deps/openssl/config/archs/VC-WIN64A/asm_avx2/configdata.pm index 56d821583031f0..fa92f5f7c7d1c9 100644 --- a/deps/openssl/config/archs/VC-WIN64A/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/VC-WIN64A/asm_avx2/configdata.pm @@ -116,8 +116,8 @@ our %config = ( sourcedir => ".", target => "VC-WIN64A", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -133,7 +133,7 @@ our %target = ( LDFLAGS => "/nologo /debug", MT => "mt", MTFLAGS => "-nologo", - RANLIB => "CODE(0x560d1c7369e0)", + RANLIB => "CODE(0x556dc44c98b0)", RC => "rc", _conf_fname_int => [ "Configurations/00-base-templates.conf", "Configurations/00-base-templates.conf", "Configurations/10-main.conf", "Configurations/10-main.conf", "Configurations/10-main.conf", "Configurations/00-base-templates.conf", "Configurations/10-main.conf", "Configurations/shared-info.pl" ], aes_asm_src => "aes_core.c aes_cbc.c vpaes-x86_64.s aesni-x86_64.s aesni-sha1-x86_64.s aesni-sha256-x86_64.s aesni-mb-x86_64.s", diff --git a/deps/openssl/config/archs/VC-WIN64A/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/VC-WIN64A/asm_avx2/crypto/buildinf.h index 15684733fd72a1..3f0a9b7e3deb83 100644 --- a/deps/openssl/config/archs/VC-WIN64A/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/VC-WIN64A/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: " -#define DATE "built on: Mon Mar 23 18:01:09 2020 UTC" +#define DATE "built on: Wed Apr 1 16:09:30 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/VC-WIN64A/asm_avx2/crypto/x86_64cpuid.asm b/deps/openssl/config/archs/VC-WIN64A/asm_avx2/crypto/x86_64cpuid.asm index f57d3210fc935f..440238dedf3267 100644 --- a/deps/openssl/config/archs/VC-WIN64A/asm_avx2/crypto/x86_64cpuid.asm +++ b/deps/openssl/config/archs/VC-WIN64A/asm_avx2/crypto/x86_64cpuid.asm @@ -478,3 +478,4 @@ $L$done_rdseed_bytes: xor r10,r10 DB 0F3h,0C3h ;repret + diff --git a/deps/openssl/config/archs/VC-WIN64A/no-asm/configdata.pm b/deps/openssl/config/archs/VC-WIN64A/no-asm/configdata.pm index 079d26caf926d8..8105ace326af9b 100644 --- a/deps/openssl/config/archs/VC-WIN64A/no-asm/configdata.pm +++ b/deps/openssl/config/archs/VC-WIN64A/no-asm/configdata.pm @@ -115,8 +115,8 @@ our %config = ( sourcedir => ".", target => "VC-WIN64A", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -132,7 +132,7 @@ our %target = ( LDFLAGS => "/nologo /debug", MT => "mt", MTFLAGS => "-nologo", - RANLIB => "CODE(0x564a5cb99b30)", + RANLIB => "CODE(0x5632a8aeeaf0)", RC => "rc", _conf_fname_int => [ "Configurations/00-base-templates.conf", "Configurations/00-base-templates.conf", "Configurations/10-main.conf", "Configurations/10-main.conf", "Configurations/10-main.conf", "Configurations/10-main.conf", "Configurations/shared-info.pl" ], aes_asm_src => "aes_core.c aes_cbc.c", diff --git a/deps/openssl/config/archs/VC-WIN64A/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/VC-WIN64A/no-asm/crypto/buildinf.h index b88907563ba47e..a9ec9e2ec52aed 100644 --- a/deps/openssl/config/archs/VC-WIN64A/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/VC-WIN64A/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: " -#define DATE "built on: Mon Mar 23 18:01:30 2020 UTC" +#define DATE "built on: Wed Apr 1 16:09:42 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/aix-gcc/asm/configdata.pm b/deps/openssl/config/archs/aix-gcc/asm/configdata.pm index a12c29be96ab63..cec54e11d4635c 100644 --- a/deps/openssl/config/archs/aix-gcc/asm/configdata.pm +++ b/deps/openssl/config/archs/aix-gcc/asm/configdata.pm @@ -110,8 +110,8 @@ our %config = ( sourcedir => ".", target => "aix-gcc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/aix-gcc/asm/crypto/buildinf.h b/deps/openssl/config/archs/aix-gcc/asm/crypto/buildinf.h index 222b4a9dd11bf3..138705500c54ff 100644 --- a/deps/openssl/config/archs/aix-gcc/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/aix-gcc/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: aix-gcc" -#define DATE "built on: Mon Mar 23 17:49:29 2020 UTC" +#define DATE "built on: Wed Apr 1 16:05:51 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/aix-gcc/asm_avx2/configdata.pm b/deps/openssl/config/archs/aix-gcc/asm_avx2/configdata.pm index c4476ed295a978..4751cffbb70546 100644 --- a/deps/openssl/config/archs/aix-gcc/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/aix-gcc/asm_avx2/configdata.pm @@ -110,8 +110,8 @@ our %config = ( sourcedir => ".", target => "aix-gcc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/aix-gcc/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/aix-gcc/asm_avx2/crypto/buildinf.h index bfd4894e5b9762..21a6ed2e88914f 100644 --- a/deps/openssl/config/archs/aix-gcc/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/aix-gcc/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: aix-gcc" -#define DATE "built on: Mon Mar 23 17:49:34 2020 UTC" +#define DATE "built on: Wed Apr 1 16:05:54 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/aix-gcc/no-asm/configdata.pm b/deps/openssl/config/archs/aix-gcc/no-asm/configdata.pm index d34e17b73db965..1256652c1cb360 100644 --- a/deps/openssl/config/archs/aix-gcc/no-asm/configdata.pm +++ b/deps/openssl/config/archs/aix-gcc/no-asm/configdata.pm @@ -110,8 +110,8 @@ our %config = ( sourcedir => ".", target => "aix-gcc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/aix-gcc/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/aix-gcc/no-asm/crypto/buildinf.h index bd5ba8d392265d..f20c321546cc44 100644 --- a/deps/openssl/config/archs/aix-gcc/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/aix-gcc/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: aix-gcc" -#define DATE "built on: Mon Mar 23 17:49:38 2020 UTC" +#define DATE "built on: Wed Apr 1 16:05:57 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/aix64-gcc/asm/configdata.pm b/deps/openssl/config/archs/aix64-gcc/asm/configdata.pm index 543f5f18fd9994..92e9106a9ead07 100644 --- a/deps/openssl/config/archs/aix64-gcc/asm/configdata.pm +++ b/deps/openssl/config/archs/aix64-gcc/asm/configdata.pm @@ -110,8 +110,8 @@ our %config = ( sourcedir => ".", target => "aix64-gcc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/aix64-gcc/asm/crypto/buildinf.h b/deps/openssl/config/archs/aix64-gcc/asm/crypto/buildinf.h index cc2451a5a2800f..5765cf1aab649e 100644 --- a/deps/openssl/config/archs/aix64-gcc/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/aix64-gcc/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: aix64-gcc" -#define DATE "built on: Mon Mar 23 17:49:40 2020 UTC" +#define DATE "built on: Wed Apr 1 16:05:59 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/aix64-gcc/asm_avx2/configdata.pm b/deps/openssl/config/archs/aix64-gcc/asm_avx2/configdata.pm index dbb53471a3123c..8c032eba466b40 100644 --- a/deps/openssl/config/archs/aix64-gcc/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/aix64-gcc/asm_avx2/configdata.pm @@ -110,8 +110,8 @@ our %config = ( sourcedir => ".", target => "aix64-gcc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/aix64-gcc/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/aix64-gcc/asm_avx2/crypto/buildinf.h index 149edf87ca9ea1..dcf3ca12b83a4b 100644 --- a/deps/openssl/config/archs/aix64-gcc/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/aix64-gcc/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: aix64-gcc" -#define DATE "built on: Mon Mar 23 17:49:47 2020 UTC" +#define DATE "built on: Wed Apr 1 16:06:03 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/aix64-gcc/no-asm/configdata.pm b/deps/openssl/config/archs/aix64-gcc/no-asm/configdata.pm index 9ee1ca57aa5bf8..8d75398c293282 100644 --- a/deps/openssl/config/archs/aix64-gcc/no-asm/configdata.pm +++ b/deps/openssl/config/archs/aix64-gcc/no-asm/configdata.pm @@ -110,8 +110,8 @@ our %config = ( sourcedir => ".", target => "aix64-gcc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/aix64-gcc/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/aix64-gcc/no-asm/crypto/buildinf.h index ae6762e6513b5e..f49ae47bb8a964 100644 --- a/deps/openssl/config/archs/aix64-gcc/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/aix64-gcc/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: aix64-gcc" -#define DATE "built on: Mon Mar 23 17:49:52 2020 UTC" +#define DATE "built on: Wed Apr 1 16:06:07 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/darwin-i386-cc/asm/configdata.pm b/deps/openssl/config/archs/darwin-i386-cc/asm/configdata.pm index c6da41afe902fa..0a76c54d0d66ce 100644 --- a/deps/openssl/config/archs/darwin-i386-cc/asm/configdata.pm +++ b/deps/openssl/config/archs/darwin-i386-cc/asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "darwin-i386-cc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/darwin-i386-cc/asm/crypto/buildinf.h b/deps/openssl/config/archs/darwin-i386-cc/asm/crypto/buildinf.h index a6bea9391397e0..67bb3418cfb343 100644 --- a/deps/openssl/config/archs/darwin-i386-cc/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/darwin-i386-cc/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: darwin-i386-cc" -#define DATE "built on: Mon Mar 23 17:52:03 2020 UTC" +#define DATE "built on: Wed Apr 1 16:06:51 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/darwin-i386-cc/asm_avx2/configdata.pm b/deps/openssl/config/archs/darwin-i386-cc/asm_avx2/configdata.pm index f99baeab4e86e8..100cc17f36cce5 100644 --- a/deps/openssl/config/archs/darwin-i386-cc/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/darwin-i386-cc/asm_avx2/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "darwin-i386-cc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/darwin-i386-cc/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/darwin-i386-cc/asm_avx2/crypto/buildinf.h index d3a0b9bb3f528c..5c01deb249f26d 100644 --- a/deps/openssl/config/archs/darwin-i386-cc/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/darwin-i386-cc/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: darwin-i386-cc" -#define DATE "built on: Mon Mar 23 17:52:13 2020 UTC" +#define DATE "built on: Wed Apr 1 16:06:55 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/darwin-i386-cc/no-asm/configdata.pm b/deps/openssl/config/archs/darwin-i386-cc/no-asm/configdata.pm index 28f4dd04a918fe..55c903e9211ff2 100644 --- a/deps/openssl/config/archs/darwin-i386-cc/no-asm/configdata.pm +++ b/deps/openssl/config/archs/darwin-i386-cc/no-asm/configdata.pm @@ -110,8 +110,8 @@ our %config = ( sourcedir => ".", target => "darwin-i386-cc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/darwin-i386-cc/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/darwin-i386-cc/no-asm/crypto/buildinf.h index c30aa7ee289802..0efa934db23156 100644 --- a/deps/openssl/config/archs/darwin-i386-cc/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/darwin-i386-cc/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: darwin-i386-cc" -#define DATE "built on: Mon Mar 23 17:52:22 2020 UTC" +#define DATE "built on: Wed Apr 1 16:06:59 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/asm/configdata.pm b/deps/openssl/config/archs/darwin64-x86_64-cc/asm/configdata.pm index b59cd82d4d1d34..3d64e504b2b12b 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/asm/configdata.pm +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "darwin64-x86_64-cc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/aes/aesni-sha1-x86_64.s b/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/aes/aesni-sha1-x86_64.s index 738aa6dc53b327..b0e1036b92e962 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/aes/aesni-sha1-x86_64.s +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/aes/aesni-sha1-x86_64.s @@ -3020,3 +3020,4 @@ L$aesenclast14: movd %xmm9,16(%r9) .byte 0xf3,0xc3 + diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/aes/aesni-sha256-x86_64.s b/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/aes/aesni-sha256-x86_64.s index 3aee22c3bc57a8..cbf5ae8eb01d5c 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/aes/aesni-sha256-x86_64.s +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/aes/aesni-sha256-x86_64.s @@ -4432,3 +4432,4 @@ L$aesenclast4: movdqu %xmm2,16(%r9) .byte 0xf3,0xc3 + diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/buildinf.h b/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/buildinf.h index 503c82f384ddf2..346e1e6ea61474 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: darwin64-x86_64-cc" -#define DATE "built on: Mon Mar 23 17:51:02 2020 UTC" +#define DATE "built on: Wed Apr 1 16:06:30 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/poly1305/poly1305-x86_64.s b/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/poly1305/poly1305-x86_64.s index 19e55e1d329afe..9cf31326ce3177 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/poly1305/poly1305-x86_64.s +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/poly1305/poly1305-x86_64.s @@ -3551,3 +3551,4 @@ L$done_dec: movq %rdx,%rax .byte 0xf3,0xc3 + diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/x86_64cpuid.s b/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/x86_64cpuid.s index 32a92da69f0b8c..41e09df8a9705a 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/x86_64cpuid.s +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/asm/crypto/x86_64cpuid.s @@ -479,3 +479,4 @@ L$done_rdseed_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 + diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/configdata.pm b/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/configdata.pm index 145de5345ab388..65df8ff59e7be8 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "darwin64-x86_64-cc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/aes/aesni-sha1-x86_64.s b/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/aes/aesni-sha1-x86_64.s index 738aa6dc53b327..b0e1036b92e962 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/aes/aesni-sha1-x86_64.s +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/aes/aesni-sha1-x86_64.s @@ -3020,3 +3020,4 @@ L$aesenclast14: movd %xmm9,16(%r9) .byte 0xf3,0xc3 + diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/aes/aesni-sha256-x86_64.s b/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/aes/aesni-sha256-x86_64.s index 3aee22c3bc57a8..cbf5ae8eb01d5c 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/aes/aesni-sha256-x86_64.s +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/aes/aesni-sha256-x86_64.s @@ -4432,3 +4432,4 @@ L$aesenclast4: movdqu %xmm2,16(%r9) .byte 0xf3,0xc3 + diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/buildinf.h index 9ad23593e4a5f2..6536df14a452c6 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: darwin64-x86_64-cc" -#define DATE "built on: Mon Mar 23 17:51:26 2020 UTC" +#define DATE "built on: Wed Apr 1 16:06:40 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/poly1305/poly1305-x86_64.s b/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/poly1305/poly1305-x86_64.s index f6afad8ff0c006..a5d240b7462795 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/poly1305/poly1305-x86_64.s +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/poly1305/poly1305-x86_64.s @@ -2035,3 +2035,4 @@ L$done_dec: movq %rdx,%rax .byte 0xf3,0xc3 + diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/x86_64cpuid.s b/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/x86_64cpuid.s index 32a92da69f0b8c..41e09df8a9705a 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/x86_64cpuid.s +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/asm_avx2/crypto/x86_64cpuid.s @@ -479,3 +479,4 @@ L$done_rdseed_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 + diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/no-asm/configdata.pm b/deps/openssl/config/archs/darwin64-x86_64-cc/no-asm/configdata.pm index 2ef91b8727659f..6a498a6a25dcbe 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/no-asm/configdata.pm +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/no-asm/configdata.pm @@ -110,8 +110,8 @@ our %config = ( sourcedir => ".", target => "darwin64-x86_64-cc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/darwin64-x86_64-cc/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/darwin64-x86_64-cc/no-asm/crypto/buildinf.h index a37361590ad7f0..d0023df82d7a67 100644 --- a/deps/openssl/config/archs/darwin64-x86_64-cc/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/darwin64-x86_64-cc/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: darwin64-x86_64-cc" -#define DATE "built on: Mon Mar 23 17:51:58 2020 UTC" +#define DATE "built on: Wed Apr 1 16:06:49 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-aarch64/asm/configdata.pm b/deps/openssl/config/archs/linux-aarch64/asm/configdata.pm index 4b49e615105952..5735b3fd999b70 100644 --- a/deps/openssl/config/archs/linux-aarch64/asm/configdata.pm +++ b/deps/openssl/config/archs/linux-aarch64/asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-aarch64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-aarch64/asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-aarch64/asm/crypto/buildinf.h index 80d887b8ec1c70..9ec9564deeb463 100644 --- a/deps/openssl/config/archs/linux-aarch64/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-aarch64/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-aarch64" -#define DATE "built on: Mon Mar 23 17:52:27 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:01 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-aarch64/asm_avx2/configdata.pm b/deps/openssl/config/archs/linux-aarch64/asm_avx2/configdata.pm index 7656377489e1f2..218b643ab58dcf 100644 --- a/deps/openssl/config/archs/linux-aarch64/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/linux-aarch64/asm_avx2/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-aarch64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-aarch64/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/linux-aarch64/asm_avx2/crypto/buildinf.h index 766e6ff40b77c2..a3dfbc30cf1682 100644 --- a/deps/openssl/config/archs/linux-aarch64/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-aarch64/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-aarch64" -#define DATE "built on: Mon Mar 23 17:52:37 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:05 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-aarch64/no-asm/configdata.pm b/deps/openssl/config/archs/linux-aarch64/no-asm/configdata.pm index 45d80272885160..d605c941f89716 100644 --- a/deps/openssl/config/archs/linux-aarch64/no-asm/configdata.pm +++ b/deps/openssl/config/archs/linux-aarch64/no-asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-aarch64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-aarch64/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-aarch64/no-asm/crypto/buildinf.h index 18b039d7163e69..cd67300a092d37 100644 --- a/deps/openssl/config/archs/linux-aarch64/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-aarch64/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-aarch64" -#define DATE "built on: Mon Mar 23 17:52:48 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:08 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-armv4/asm/configdata.pm b/deps/openssl/config/archs/linux-armv4/asm/configdata.pm index 8cc20561335e9e..96991e128d99b1 100644 --- a/deps/openssl/config/archs/linux-armv4/asm/configdata.pm +++ b/deps/openssl/config/archs/linux-armv4/asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-armv4", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-armv4/asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-armv4/asm/crypto/buildinf.h index 8f502fae8c87ca..9b1f6c03aeb125 100644 --- a/deps/openssl/config/archs/linux-armv4/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-armv4/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-armv4" -#define DATE "built on: Mon Mar 23 17:52:51 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:10 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-armv4/asm_avx2/configdata.pm b/deps/openssl/config/archs/linux-armv4/asm_avx2/configdata.pm index c0273c7ac8214a..0083ccfc906e26 100644 --- a/deps/openssl/config/archs/linux-armv4/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/linux-armv4/asm_avx2/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-armv4", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-armv4/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/linux-armv4/asm_avx2/crypto/buildinf.h index af26cc4faaba76..111763d1763f48 100644 --- a/deps/openssl/config/archs/linux-armv4/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-armv4/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-armv4" -#define DATE "built on: Mon Mar 23 17:52:56 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:14 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-armv4/no-asm/configdata.pm b/deps/openssl/config/archs/linux-armv4/no-asm/configdata.pm index eaea0bf537faf6..c7439e7ee4a04d 100644 --- a/deps/openssl/config/archs/linux-armv4/no-asm/configdata.pm +++ b/deps/openssl/config/archs/linux-armv4/no-asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-armv4", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-armv4/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-armv4/no-asm/crypto/buildinf.h index 1071ca64f4f66f..dcd0cefc817be2 100644 --- a/deps/openssl/config/archs/linux-armv4/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-armv4/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-armv4" -#define DATE "built on: Mon Mar 23 17:53:01 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:17 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-elf/asm/configdata.pm b/deps/openssl/config/archs/linux-elf/asm/configdata.pm index 546b7e163a1216..91efdb2b65e34d 100644 --- a/deps/openssl/config/archs/linux-elf/asm/configdata.pm +++ b/deps/openssl/config/archs/linux-elf/asm/configdata.pm @@ -112,8 +112,8 @@ our %config = ( sourcedir => ".", target => "linux-elf", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-elf/asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-elf/asm/crypto/buildinf.h index 30d3d94b4399e9..1e9173a9a2b7fa 100644 --- a/deps/openssl/config/archs/linux-elf/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-elf/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-elf" -#define DATE "built on: Mon Mar 23 17:53:04 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:19 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-elf/asm_avx2/configdata.pm b/deps/openssl/config/archs/linux-elf/asm_avx2/configdata.pm index b09fddeec6b8a4..db41d3c6d683aa 100644 --- a/deps/openssl/config/archs/linux-elf/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/linux-elf/asm_avx2/configdata.pm @@ -112,8 +112,8 @@ our %config = ( sourcedir => ".", target => "linux-elf", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-elf/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/linux-elf/asm_avx2/crypto/buildinf.h index 8108bb02d31d13..126b123814f905 100644 --- a/deps/openssl/config/archs/linux-elf/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-elf/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-elf" -#define DATE "built on: Mon Mar 23 17:53:10 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:23 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-elf/no-asm/configdata.pm b/deps/openssl/config/archs/linux-elf/no-asm/configdata.pm index 09e453044d7135..69ea1a1058915a 100644 --- a/deps/openssl/config/archs/linux-elf/no-asm/configdata.pm +++ b/deps/openssl/config/archs/linux-elf/no-asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-elf", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-elf/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-elf/no-asm/crypto/buildinf.h index 7bea1fe229707f..9b24510752e746 100644 --- a/deps/openssl/config/archs/linux-elf/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-elf/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-elf" -#define DATE "built on: Mon Mar 23 17:53:17 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:27 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-ppc/asm/configdata.pm b/deps/openssl/config/archs/linux-ppc/asm/configdata.pm index 3563067771dd00..8098ae31dbb37b 100644 --- a/deps/openssl/config/archs/linux-ppc/asm/configdata.pm +++ b/deps/openssl/config/archs/linux-ppc/asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-ppc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-ppc/asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-ppc/asm/crypto/buildinf.h index 4f70555d14b0dc..73d97f3a9b4796 100644 --- a/deps/openssl/config/archs/linux-ppc/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-ppc/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-ppc" -#define DATE "built on: Mon Mar 23 17:55:24 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:10 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-ppc/asm_avx2/configdata.pm b/deps/openssl/config/archs/linux-ppc/asm_avx2/configdata.pm index db65e88ce7d3b3..2a7beb0f47caa8 100644 --- a/deps/openssl/config/archs/linux-ppc/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/linux-ppc/asm_avx2/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-ppc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-ppc/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/linux-ppc/asm_avx2/crypto/buildinf.h index 5772f225114ffd..87be02dee4e5ba 100644 --- a/deps/openssl/config/archs/linux-ppc/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-ppc/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-ppc" -#define DATE "built on: Mon Mar 23 17:55:28 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:13 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-ppc/no-asm/configdata.pm b/deps/openssl/config/archs/linux-ppc/no-asm/configdata.pm index 20a0a9632d7c45..8fa9a01df8c422 100644 --- a/deps/openssl/config/archs/linux-ppc/no-asm/configdata.pm +++ b/deps/openssl/config/archs/linux-ppc/no-asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-ppc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-ppc/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-ppc/no-asm/crypto/buildinf.h index 189f4f913dac7c..f4fe911816b716 100644 --- a/deps/openssl/config/archs/linux-ppc/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-ppc/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-ppc" -#define DATE "built on: Mon Mar 23 17:55:34 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:17 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-ppc64/asm/configdata.pm b/deps/openssl/config/archs/linux-ppc64/asm/configdata.pm index 3fb5832578408b..646d3b46d59d42 100644 --- a/deps/openssl/config/archs/linux-ppc64/asm/configdata.pm +++ b/deps/openssl/config/archs/linux-ppc64/asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-ppc64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-ppc64/asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-ppc64/asm/crypto/buildinf.h index a5c60310dbb980..2b089d0cd1be70 100644 --- a/deps/openssl/config/archs/linux-ppc64/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-ppc64/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-ppc64" -#define DATE "built on: Mon Mar 23 17:55:38 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:19 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-ppc64/asm_avx2/configdata.pm b/deps/openssl/config/archs/linux-ppc64/asm_avx2/configdata.pm index b9823cc1c5bad2..4f815cad099260 100644 --- a/deps/openssl/config/archs/linux-ppc64/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/linux-ppc64/asm_avx2/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-ppc64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-ppc64/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/linux-ppc64/asm_avx2/crypto/buildinf.h index 82b92e5e17400c..ae6f43242f9b39 100644 --- a/deps/openssl/config/archs/linux-ppc64/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-ppc64/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-ppc64" -#define DATE "built on: Mon Mar 23 17:55:50 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:22 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-ppc64/no-asm/configdata.pm b/deps/openssl/config/archs/linux-ppc64/no-asm/configdata.pm index a7a9844a1e8c12..84245e17c0a4a7 100644 --- a/deps/openssl/config/archs/linux-ppc64/no-asm/configdata.pm +++ b/deps/openssl/config/archs/linux-ppc64/no-asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-ppc64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-ppc64/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-ppc64/no-asm/crypto/buildinf.h index f82aa75315ad6f..cfc9142e8477cb 100644 --- a/deps/openssl/config/archs/linux-ppc64/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-ppc64/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-ppc64" -#define DATE "built on: Mon Mar 23 17:56:09 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:26 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-ppc64le/asm/configdata.pm b/deps/openssl/config/archs/linux-ppc64le/asm/configdata.pm index 385fabbbb0c074..007265ff1b50d1 100644 --- a/deps/openssl/config/archs/linux-ppc64le/asm/configdata.pm +++ b/deps/openssl/config/archs/linux-ppc64le/asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-ppc64le", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-ppc64le/asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-ppc64le/asm/crypto/buildinf.h index 97f5e040319ac8..d535a10d2fb7a2 100644 --- a/deps/openssl/config/archs/linux-ppc64le/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-ppc64le/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-ppc64le" -#define DATE "built on: Mon Mar 23 17:56:24 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:28 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-ppc64le/asm_avx2/configdata.pm b/deps/openssl/config/archs/linux-ppc64le/asm_avx2/configdata.pm index f73e1f8355291d..989a44049cffcc 100644 --- a/deps/openssl/config/archs/linux-ppc64le/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/linux-ppc64le/asm_avx2/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-ppc64le", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-ppc64le/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/linux-ppc64le/asm_avx2/crypto/buildinf.h index 8062de1ac15746..8ee145647a4d81 100644 --- a/deps/openssl/config/archs/linux-ppc64le/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-ppc64le/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-ppc64le" -#define DATE "built on: Mon Mar 23 17:56:43 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:32 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-ppc64le/no-asm/configdata.pm b/deps/openssl/config/archs/linux-ppc64le/no-asm/configdata.pm index 9fbc9449265923..7a94d38534ff09 100644 --- a/deps/openssl/config/archs/linux-ppc64le/no-asm/configdata.pm +++ b/deps/openssl/config/archs/linux-ppc64le/no-asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-ppc64le", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-ppc64le/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-ppc64le/no-asm/crypto/buildinf.h index c51d77d520a325..595b71f88df238 100644 --- a/deps/openssl/config/archs/linux-ppc64le/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-ppc64le/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-ppc64le" -#define DATE "built on: Mon Mar 23 17:56:56 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:35 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-x32/asm/configdata.pm b/deps/openssl/config/archs/linux-x32/asm/configdata.pm index c4ff862e01cdf1..1562436fa84ad2 100644 --- a/deps/openssl/config/archs/linux-x32/asm/configdata.pm +++ b/deps/openssl/config/archs/linux-x32/asm/configdata.pm @@ -112,8 +112,8 @@ our %config = ( sourcedir => ".", target => "linux-x32", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/aes/aesni-sha1-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/aes/aesni-sha1-x86_64.s index 978bd2b6239c15..a38e21f0484e24 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/aes/aesni-sha1-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/aes/aesni-sha1-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha1_enc,@function .align 32 aesni_cbc_sha1_enc: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+0(%rip),%r10d movq OPENSSL_ia32cap_P+4(%rip),%r11 @@ -18,7 +18,7 @@ aesni_cbc_sha1_enc: je aesni_cbc_sha1_enc_avx jmp aesni_cbc_sha1_enc_ssse3 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc .type aesni_cbc_sha1_enc_ssse3,@function .align 32 @@ -2732,7 +2732,7 @@ K_XX_XX: .type aesni_cbc_sha1_enc_shaext,@function .align 32 aesni_cbc_sha1_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 movdqu (%r9),%xmm8 movd 16(%r9),%xmm9 @@ -3031,5 +3031,5 @@ aesni_cbc_sha1_enc_shaext: movdqu %xmm8,(%r9) movd %xmm9,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/aes/aesni-sha256-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/aes/aesni-sha256-x86_64.s index dd09f1b290af62..3e56a82578a354 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/aes/aesni-sha256-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/aes/aesni-sha256-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha256_enc,@function .align 16 aesni_cbc_sha256_enc: -.cfi_startproc +.cfi_startproc leaq OPENSSL_ia32cap_P(%rip),%r11 movl $1,%eax cmpq $0,%rdi @@ -31,7 +31,7 @@ aesni_cbc_sha256_enc: ud2 .Lprobe: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc,.-aesni_cbc_sha256_enc .align 64 @@ -4081,7 +4081,7 @@ aesni_cbc_sha256_enc_avx2: .type aesni_cbc_sha256_enc_shaext,@function .align 32 aesni_cbc_sha256_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 leaq K256+128(%rip),%rax movdqu (%r9),%xmm1 @@ -4431,5 +4431,5 @@ aesni_cbc_sha256_enc_shaext: movdqu %xmm1,(%r9) movdqu %xmm2,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc_shaext,.-aesni_cbc_sha256_enc_shaext diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/aes/aesni-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/aes/aesni-x86_64.s index c1e791eff59235..1a4b22e7b8a5d9 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/aes/aesni-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/aes/aesni-x86_64.s @@ -861,7 +861,7 @@ aesni_ecb_encrypt: .type aesni_ccm64_encrypt_blocks,@function .align 16 aesni_ccm64_encrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movdqu (%r8),%xmm6 movdqa .Lincrement64(%rip),%xmm9 @@ -920,13 +920,13 @@ aesni_ccm64_encrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks .globl aesni_ccm64_decrypt_blocks .type aesni_ccm64_decrypt_blocks,@function .align 16 aesni_ccm64_decrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movups (%r8),%xmm6 movdqu (%r9),%xmm3 @@ -1019,7 +1019,7 @@ aesni_ccm64_decrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks .globl aesni_ctr32_encrypt_blocks .type aesni_ctr32_encrypt_blocks,@function @@ -2794,7 +2794,7 @@ aesni_ocb_encrypt: .type __ocb_encrypt6,@function .align 32 __ocb_encrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2892,13 +2892,13 @@ __ocb_encrypt6: .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt6,.-__ocb_encrypt6 .type __ocb_encrypt4,@function .align 32 __ocb_encrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2963,13 +2963,13 @@ __ocb_encrypt4: .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt4,.-__ocb_encrypt4 .type __ocb_encrypt1,@function .align 32 __ocb_encrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm2,%xmm8 @@ -3000,7 +3000,7 @@ __ocb_encrypt1: .byte 102,15,56,221,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt1,.-__ocb_encrypt1 .globl aesni_ocb_decrypt @@ -3243,7 +3243,7 @@ aesni_ocb_decrypt: .type __ocb_decrypt6,@function .align 32 __ocb_decrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3335,13 +3335,13 @@ __ocb_decrypt6: .byte 102,65,15,56,223,246 .byte 102,65,15,56,223,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt6,.-__ocb_decrypt6 .type __ocb_decrypt4,@function .align 32 __ocb_decrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3402,13 +3402,13 @@ __ocb_decrypt4: .byte 102,65,15,56,223,228 .byte 102,65,15,56,223,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt4,.-__ocb_decrypt4 .type __ocb_decrypt1,@function .align 32 __ocb_decrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm7,%xmm2 @@ -3438,7 +3438,7 @@ __ocb_decrypt1: .byte 102,15,56,223,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt1,.-__ocb_decrypt1 .globl aesni_cbc_encrypt .type aesni_cbc_encrypt,@function @@ -4447,7 +4447,7 @@ __aesni_set_encrypt_key: shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_set_encrypt_key,.-aesni_set_encrypt_key .size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key .align 64 diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/bn/rsaz-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/bn/rsaz-x86_64.s index 7876e0b8f93d9c..d5025b23cd668e 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/bn/rsaz-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/bn/rsaz-x86_64.s @@ -1453,7 +1453,7 @@ rsaz_512_mul_by_one: .type __rsaz_512_reduce,@function .align 32 __rsaz_512_reduce: -.cfi_startproc +.cfi_startproc movq %r8,%rbx imulq 128+8(%rsp),%rbx movq 0(%rbp),%rax @@ -1533,12 +1533,12 @@ __rsaz_512_reduce: jne .Lreduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reduce,.-__rsaz_512_reduce .type __rsaz_512_reducex,@function .align 32 __rsaz_512_reducex: -.cfi_startproc +.cfi_startproc imulq %r8,%rdx xorq %rsi,%rsi @@ -1591,12 +1591,12 @@ __rsaz_512_reducex: jne .Lreduction_loopx .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reducex,.-__rsaz_512_reducex .type __rsaz_512_subtract,@function .align 32 __rsaz_512_subtract: -.cfi_startproc +.cfi_startproc movq %r8,(%rdi) movq %r9,8(%rdi) movq %r10,16(%rdi) @@ -1650,12 +1650,12 @@ __rsaz_512_subtract: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_subtract,.-__rsaz_512_subtract .type __rsaz_512_mul,@function .align 32 __rsaz_512_mul: -.cfi_startproc +.cfi_startproc leaq 8(%rsp),%rdi movq (%rsi),%rax @@ -1794,12 +1794,12 @@ __rsaz_512_mul: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mul,.-__rsaz_512_mul .type __rsaz_512_mulx,@function .align 32 __rsaz_512_mulx: -.cfi_startproc +.cfi_startproc mulxq (%rsi),%rbx,%r8 movq $-6,%rcx @@ -1916,13 +1916,13 @@ __rsaz_512_mulx: movq %r15,8+64+56(%rsp) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mulx,.-__rsaz_512_mulx .globl rsaz_512_scatter4 .type rsaz_512_scatter4,@function .align 16 rsaz_512_scatter4: -.cfi_startproc +.cfi_startproc leaq (%rdi,%rdx,8),%rdi movl $8,%r9d jmp .Loop_scatter @@ -1935,14 +1935,14 @@ rsaz_512_scatter4: decl %r9d jnz .Loop_scatter .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size rsaz_512_scatter4,.-rsaz_512_scatter4 .globl rsaz_512_gather4 .type rsaz_512_gather4,@function .align 16 rsaz_512_gather4: -.cfi_startproc +.cfi_startproc movd %edx,%xmm8 movdqa .Linc+16(%rip),%xmm1 movdqa .Linc(%rip),%xmm0 @@ -2006,7 +2006,7 @@ rsaz_512_gather4: jnz .Loop_gather .byte 0xf3,0xc3 .LSEH_end_rsaz_512_gather4: -.cfi_endproc +.cfi_endproc .size rsaz_512_gather4,.-rsaz_512_gather4 .align 64 diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/bn/x86_64-mont5.s b/deps/openssl/config/archs/linux-x32/asm/crypto/bn/x86_64-mont5.s index 40a60a3c8fc6b9..ab93b02d8c1aae 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/bn/x86_64-mont5.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/bn/x86_64-mont5.s @@ -550,7 +550,7 @@ bn_mul4x_mont_gather5: .type mul4x_internal,@function .align 32 mul4x_internal: -.cfi_startproc +.cfi_startproc shlq $5,%r9 movd 8(%rax),%xmm5 leaq .Linc(%rip),%rax @@ -1072,7 +1072,7 @@ mul4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry -.cfi_endproc +.cfi_endproc .size mul4x_internal,.-mul4x_internal .globl bn_power5 .type bn_power5,@function @@ -1215,7 +1215,7 @@ bn_power5: .align 32 bn_sqr8x_internal: __bn_sqr8x_internal: -.cfi_startproc +.cfi_startproc @@ -1990,12 +1990,12 @@ __bn_sqr8x_reduction: cmpq %rdx,%rdi jb .L8x_reduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_sqr8x_internal,.-bn_sqr8x_internal .type __bn_post4x_internal,@function .align 32 __bn_post4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx @@ -2046,18 +2046,18 @@ __bn_post4x_internal: movq %r9,%r10 negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_post4x_internal,.-__bn_post4x_internal .globl bn_from_montgomery .type bn_from_montgomery,@function .align 32 bn_from_montgomery: -.cfi_startproc +.cfi_startproc testl $7,%r9d jz bn_from_mont8x xorl %eax,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_from_montgomery,.-bn_from_montgomery .type bn_from_mont8x,@function @@ -2341,7 +2341,7 @@ bn_mulx4x_mont_gather5: .type mulx4x_internal,@function .align 32 mulx4x_internal: -.cfi_startproc +.cfi_startproc movq %r9,8(%rsp) movq %r9,%r10 negq %r9 @@ -2760,7 +2760,7 @@ mulx4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry -.cfi_endproc +.cfi_endproc .size mulx4x_internal,.-mulx4x_internal .type bn_powerx5,@function .align 32 @@ -3519,7 +3519,7 @@ __bn_sqrx8x_reduction: .size bn_sqrx8x_internal,.-bn_sqrx8x_internal .align 32 __bn_postx4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 @@ -3567,13 +3567,13 @@ __bn_postx4x_internal: negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_postx4x_internal,.-__bn_postx4x_internal .globl bn_get_bits5 .type bn_get_bits5,@function .align 16 bn_get_bits5: -.cfi_startproc +.cfi_startproc leaq 0(%rdi),%r10 leaq 1(%rdi),%r11 movl %esi,%ecx @@ -3587,14 +3587,14 @@ bn_get_bits5: shrl %cl,%eax andl $31,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_get_bits5,.-bn_get_bits5 .globl bn_scatter5 .type bn_scatter5,@function .align 16 bn_scatter5: -.cfi_startproc +.cfi_startproc cmpl $0,%esi jz .Lscatter_epilogue leaq (%rdx,%rcx,8),%rdx @@ -3607,7 +3607,7 @@ bn_scatter5: jnz .Lscatter .Lscatter_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_scatter5,.-bn_scatter5 .globl bn_gather5 @@ -3615,7 +3615,7 @@ bn_scatter5: .align 32 bn_gather5: .LSEH_begin_bn_gather5: -.cfi_startproc +.cfi_startproc .byte 0x4c,0x8d,0x14,0x24 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 @@ -3773,7 +3773,7 @@ bn_gather5: leaq (%r10),%rsp .byte 0xf3,0xc3 .LSEH_end_bn_gather5: -.cfi_endproc +.cfi_endproc .size bn_gather5,.-bn_gather5 .align 64 .Linc: diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-x32/asm/crypto/buildinf.h index 469e96e5686574..b6469a1db3faa7 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-x32" -#define DATE "built on: Mon Mar 23 17:53:20 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:29 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/camellia/cmll-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/camellia/cmll-x86_64.s index eeb20dd2291da7..92056f8b1e0e0e 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/camellia/cmll-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/camellia/cmll-x86_64.s @@ -5,13 +5,13 @@ .type Camellia_EncryptBlock,@function .align 16 Camellia_EncryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Lenc_rounds -.cfi_endproc +.cfi_endproc .size Camellia_EncryptBlock,.-Camellia_EncryptBlock .globl Camellia_EncryptBlock_Rounds @@ -85,7 +85,7 @@ Camellia_EncryptBlock_Rounds: .type _x86_64_Camellia_encrypt,@function .align 16 _x86_64_Camellia_encrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -288,7 +288,7 @@ _x86_64_Camellia_encrypt: movl %edx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt @@ -296,13 +296,13 @@ _x86_64_Camellia_encrypt: .type Camellia_DecryptBlock,@function .align 16 Camellia_DecryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Ldec_rounds -.cfi_endproc +.cfi_endproc .size Camellia_DecryptBlock,.-Camellia_DecryptBlock .globl Camellia_DecryptBlock_Rounds @@ -376,7 +376,7 @@ Camellia_DecryptBlock_Rounds: .type _x86_64_Camellia_decrypt,@function .align 16 _x86_64_Camellia_decrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -580,7 +580,7 @@ _x86_64_Camellia_decrypt: movl %ebx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt .globl Camellia_Ekeygen .type Camellia_Ekeygen,@function diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/ec/ecp_nistz256-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/ec/ecp_nistz256-x86_64.s index 5c9e4050416212..80569cae04667e 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/ec/ecp_nistz256-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/ec/ecp_nistz256-x86_64.s @@ -3874,12 +3874,12 @@ ecp_nistz256_ord_sqr_montx: .type ecp_nistz256_to_mont,@function .align 32 ecp_nistz256_to_mont: -.cfi_startproc +.cfi_startproc movl $0x80100,%ecx andl OPENSSL_ia32cap_P+8(%rip),%ecx leaq .LRR(%rip),%rdx jmp .Lmul_mont -.cfi_endproc +.cfi_endproc .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont @@ -4823,7 +4823,7 @@ ecp_nistz256_from_mont: .type ecp_nistz256_scatter_w5,@function .align 32 ecp_nistz256_scatter_w5: -.cfi_startproc +.cfi_startproc leal -3(%rdx,%rdx,2),%edx movdqa 0(%rsi),%xmm0 shll $5,%edx @@ -4840,7 +4840,7 @@ ecp_nistz256_scatter_w5: movdqa %xmm5,80(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5 @@ -4914,7 +4914,7 @@ ecp_nistz256_gather_w5: .type ecp_nistz256_scatter_w7,@function .align 32 ecp_nistz256_scatter_w7: -.cfi_startproc +.cfi_startproc movdqu 0(%rsi),%xmm0 shll $6,%edx movdqu 16(%rsi),%xmm1 @@ -4926,7 +4926,7 @@ ecp_nistz256_scatter_w7: movdqa %xmm3,48(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7 diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/ec/x25519-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/ec/x25519-x86_64.s index 1788e568cda5d2..8fd319c83c880d 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/ec/x25519-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/ec/x25519-x86_64.s @@ -400,14 +400,14 @@ x25519_fe51_mul121666: .type x25519_fe64_eligible,@function .align 32 x25519_fe64_eligible: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+8(%rip),%ecx xorl %eax,%eax andl $0x80100,%ecx cmpl $0x80100,%ecx cmovel %ecx,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_eligible,.-x25519_fe64_eligible .globl x25519_fe64_mul @@ -650,7 +650,7 @@ x25519_fe64_sqr: .align 32 x25519_fe64_mul121666: .Lfe64_mul121666_body: -.cfi_startproc +.cfi_startproc movl $121666,%edx mulxq 0(%rsi),%r8,%rcx mulxq 8(%rsi),%r9,%rax @@ -679,7 +679,7 @@ x25519_fe64_mul121666: .Lfe64_mul121666_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_mul121666,.-x25519_fe64_mul121666 .globl x25519_fe64_add @@ -687,7 +687,7 @@ x25519_fe64_mul121666: .align 32 x25519_fe64_add: .Lfe64_add_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -716,7 +716,7 @@ x25519_fe64_add: .Lfe64_add_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_add,.-x25519_fe64_add .globl x25519_fe64_sub @@ -724,7 +724,7 @@ x25519_fe64_add: .align 32 x25519_fe64_sub: .Lfe64_sub_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -753,7 +753,7 @@ x25519_fe64_sub: .Lfe64_sub_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_sub,.-x25519_fe64_sub .globl x25519_fe64_tobytes @@ -761,7 +761,7 @@ x25519_fe64_sub: .align 32 x25519_fe64_tobytes: .Lfe64_to_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -797,6 +797,6 @@ x25519_fe64_tobytes: .Lfe64_to_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_tobytes,.-x25519_fe64_tobytes .byte 88,50,53,53,49,57,32,112,114,105,109,105,116,105,118,101,115,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/modes/aesni-gcm-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/modes/aesni-gcm-x86_64.s index 01d89630a42f73..bf508aff6ff6ec 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/modes/aesni-gcm-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/modes/aesni-gcm-x86_64.s @@ -3,7 +3,7 @@ .type _aesni_ctr32_ghash_6x,@function .align 32 _aesni_ctr32_ghash_6x: -.cfi_startproc +.cfi_startproc vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 @@ -311,7 +311,7 @@ _aesni_ctr32_ghash_6x: vpxor %xmm4,%xmm8,%xmm8 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x .globl aesni_gcm_decrypt .type aesni_gcm_decrypt,@function @@ -418,7 +418,7 @@ aesni_gcm_decrypt: .type _aesni_ctr32_6x,@function .align 32 _aesni_ctr32_6x: -.cfi_startproc +.cfi_startproc vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%rbp),%r13 @@ -505,7 +505,7 @@ _aesni_ctr32_6x: vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_6x,.-_aesni_ctr32_6x .globl aesni_gcm_encrypt diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/poly1305/poly1305-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/poly1305/poly1305-x86_64.s index 0cdd406e84740d..4a6a34eee14ce6 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/poly1305/poly1305-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/poly1305/poly1305-x86_64.s @@ -12,7 +12,7 @@ .type poly1305_init,@function .align 32 poly1305_init: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -48,7 +48,7 @@ poly1305_init: movl $1,%eax .Lno_key: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init,.-poly1305_init .type poly1305_blocks,@function @@ -169,7 +169,7 @@ poly1305_blocks: .type poly1305_emit,@function .align 32 poly1305_emit: -.cfi_startproc +.cfi_startproc .Lemit: movq 0(%rdi),%r8 movq 8(%rdi),%r9 @@ -190,12 +190,12 @@ poly1305_emit: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit,.-poly1305_emit .type __poly1305_block,@function .align 32 __poly1305_block: -.cfi_startproc +.cfi_startproc mulq %r14 movq %rax,%r9 movq %r11,%rax @@ -235,13 +235,13 @@ __poly1305_block: adcq $0,%rbx adcq $0,%rbp .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_block,.-__poly1305_block .type __poly1305_init_avx,@function .align 32 __poly1305_init_avx: -.cfi_startproc +.cfi_startproc movq %r11,%r14 movq %r12,%rbx xorq %rbp,%rbp @@ -399,7 +399,7 @@ __poly1305_init_avx: leaq -48-64(%rdi),%rdi .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_init_avx,.-__poly1305_init_avx .type poly1305_blocks_avx,@function @@ -1240,7 +1240,7 @@ poly1305_blocks_avx: .type poly1305_emit_avx,@function .align 32 poly1305_emit_avx: -.cfi_startproc +.cfi_startproc cmpl $0,20(%rdi) je .Lemit @@ -1291,7 +1291,7 @@ poly1305_emit_avx: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_avx,.-poly1305_emit_avx .type poly1305_blocks_avx2,@function .align 32 @@ -2488,7 +2488,7 @@ poly1305_blocks_avx512: .type poly1305_init_base2_44,@function .align 32 poly1305_init_base2_44: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -2522,12 +2522,12 @@ poly1305_init_base2_44: movl %r11d,4(%rdx) movl $1,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init_base2_44,.-poly1305_init_base2_44 .type poly1305_blocks_vpmadd52,@function .align 32 poly1305_blocks_vpmadd52: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52 @@ -2634,12 +2634,12 @@ poly1305_blocks_vpmadd52: .Lno_data_vpmadd52: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52 .type poly1305_blocks_vpmadd52_4x,@function .align 32 poly1305_blocks_vpmadd52_4x: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52_4x @@ -3064,12 +3064,12 @@ poly1305_blocks_vpmadd52_4x: .Lno_data_vpmadd52_4x: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x .type poly1305_blocks_vpmadd52_8x,@function .align 32 poly1305_blocks_vpmadd52_8x: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52_8x @@ -3410,12 +3410,12 @@ poly1305_blocks_vpmadd52_8x: .Lno_data_vpmadd52_8x: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x .type poly1305_emit_base2_44,@function .align 32 poly1305_emit_base2_44: -.cfi_startproc +.cfi_startproc movq 0(%rdi),%r8 movq 8(%rdi),%r9 movq 16(%rdi),%r10 @@ -3446,7 +3446,7 @@ poly1305_emit_base2_44: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_base2_44,.-poly1305_emit_base2_44 .align 64 .Lconst: @@ -3485,7 +3485,7 @@ poly1305_emit_base2_44: .type xor128_encrypt_n_pad,@function .align 16 xor128_encrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -3527,14 +3527,14 @@ xor128_encrypt_n_pad: .Ldone_enc: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad .globl xor128_decrypt_n_pad .type xor128_decrypt_n_pad,@function .align 16 xor128_decrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -3580,5 +3580,5 @@ xor128_decrypt_n_pad: .Ldone_dec: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/rc4/rc4-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/rc4/rc4-x86_64.s index b97c757550aad0..d1d1eece70bf1e 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/rc4/rc4-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/rc4/rc4-x86_64.s @@ -5,7 +5,7 @@ .type RC4,@function .align 16 RC4: -.cfi_startproc +.cfi_startproc orq %rsi,%rsi jne .Lentry .byte 0xf3,0xc3 @@ -534,7 +534,7 @@ RC4: .type RC4_set_key,@function .align 16 RC4_set_key: -.cfi_startproc +.cfi_startproc leaq 8(%rdi),%rdi leaq (%rdx,%rsi,1),%rdx negq %rsi @@ -601,14 +601,14 @@ RC4_set_key: movl %eax,-8(%rdi) movl %eax,-4(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size RC4_set_key,.-RC4_set_key .globl RC4_options .type RC4_options,@function .align 16 RC4_options: -.cfi_startproc +.cfi_startproc leaq .Lopts(%rip),%rax movl OPENSSL_ia32cap_P(%rip),%edx btl $20,%edx @@ -621,7 +621,7 @@ RC4_options: addq $12,%rax .Ldone: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .align 64 .Lopts: .byte 114,99,52,40,56,120,44,105,110,116,41,0 diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/sha/keccak1600-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/sha/keccak1600-x86_64.s index 09617d014bdb7b..11f26e933d80b5 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/sha/keccak1600-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/sha/keccak1600-x86_64.s @@ -3,7 +3,7 @@ .type __KeccakF1600,@function .align 32 __KeccakF1600: -.cfi_startproc +.cfi_startproc movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx @@ -256,7 +256,7 @@ __KeccakF1600: leaq -192(%r15),%r15 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __KeccakF1600,.-__KeccakF1600 .type KeccakF1600,@function diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/sha/sha1-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/sha/sha1-x86_64.s index 98541727e555da..d4efc7206f57b3 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/sha/sha1-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/sha/sha1-x86_64.s @@ -1422,7 +1422,7 @@ _shaext_shortcut: movdqu %xmm0,(%rdi) movd %xmm1,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext .type sha1_block_data_order_ssse3,@function .align 16 diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/sha/sha256-x86_64.s b/deps/openssl/config/archs/linux-x32/asm/crypto/sha/sha256-x86_64.s index 9357385da3c49b..a7b60900fdd061 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/sha/sha256-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/sha/sha256-x86_64.s @@ -1775,7 +1775,7 @@ K256: .align 64 sha256_block_data_order_shaext: _shaext_shortcut: -.cfi_startproc +.cfi_startproc leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 @@ -1978,7 +1978,7 @@ _shaext_shortcut: movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext .type sha256_block_data_order_ssse3,@function .align 64 diff --git a/deps/openssl/config/archs/linux-x32/asm/crypto/x86_64cpuid.s b/deps/openssl/config/archs/linux-x32/asm/crypto/x86_64cpuid.s index 9268ce8c9a9d63..748e6d161fa24a 100644 --- a/deps/openssl/config/archs/linux-x32/asm/crypto/x86_64cpuid.s +++ b/deps/openssl/config/archs/linux-x32/asm/crypto/x86_64cpuid.s @@ -12,7 +12,7 @@ .type OPENSSL_atomic_add,@function .align 16 OPENSSL_atomic_add: -.cfi_startproc +.cfi_startproc movl (%rdi),%eax .Lspin: leaq (%rsi,%rax,1),%r8 .byte 0xf0 @@ -21,19 +21,19 @@ OPENSSL_atomic_add: movl %r8d,%eax .byte 0x48,0x98 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_atomic_add,.-OPENSSL_atomic_add .globl OPENSSL_rdtsc .type OPENSSL_rdtsc,@function .align 16 OPENSSL_rdtsc: -.cfi_startproc +.cfi_startproc rdtsc shlq $32,%rdx orq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_rdtsc,.-OPENSSL_rdtsc .globl OPENSSL_ia32_cpuid @@ -209,7 +209,7 @@ OPENSSL_ia32_cpuid: .type OPENSSL_cleanse,@function .align 16 OPENSSL_cleanse: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $15,%rsi jae .Lot @@ -239,14 +239,14 @@ OPENSSL_cleanse: cmpq $0,%rsi jne .Little .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_cleanse,.-OPENSSL_cleanse .globl CRYPTO_memcmp .type CRYPTO_memcmp,@function .align 16 CRYPTO_memcmp: -.cfi_startproc +.cfi_startproc xorq %rax,%rax xorq %r10,%r10 cmpq $0,%rdx @@ -275,13 +275,13 @@ CRYPTO_memcmp: shrq $63,%rax .Lno_data: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size CRYPTO_memcmp,.-CRYPTO_memcmp .globl OPENSSL_wipe_cpu .type OPENSSL_wipe_cpu,@function .align 16 OPENSSL_wipe_cpu: -.cfi_startproc +.cfi_startproc pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 @@ -308,13 +308,13 @@ OPENSSL_wipe_cpu: xorq %r11,%r11 leaq 8(%rsp),%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu .globl OPENSSL_instrument_bus .type OPENSSL_instrument_bus,@function .align 16 OPENSSL_instrument_bus: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rsi,%r11 @@ -341,14 +341,14 @@ OPENSSL_instrument_bus: movq %r11,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus,.-OPENSSL_instrument_bus .globl OPENSSL_instrument_bus2 .type OPENSSL_instrument_bus2,@function .align 16 OPENSSL_instrument_bus2: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rdx,%r11 @@ -391,13 +391,13 @@ OPENSSL_instrument_bus2: movq 8(%rsp),%rax subq %rcx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus2,.-OPENSSL_instrument_bus2 .globl OPENSSL_ia32_rdrand_bytes .type OPENSSL_ia32_rdrand_bytes,@function .align 16 OPENSSL_ia32_rdrand_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdrand_bytes @@ -434,13 +434,13 @@ OPENSSL_ia32_rdrand_bytes: .Ldone_rdrand_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdrand_bytes,.-OPENSSL_ia32_rdrand_bytes .globl OPENSSL_ia32_rdseed_bytes .type OPENSSL_ia32_rdseed_bytes,@function .align 16 OPENSSL_ia32_rdseed_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdseed_bytes @@ -477,5 +477,5 @@ OPENSSL_ia32_rdseed_bytes: .Ldone_rdseed_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdseed_bytes,.-OPENSSL_ia32_rdseed_bytes diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/configdata.pm b/deps/openssl/config/archs/linux-x32/asm_avx2/configdata.pm index 756a0f44068b91..0052744c840be0 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/configdata.pm @@ -112,8 +112,8 @@ our %config = ( sourcedir => ".", target => "linux-x32", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/aes/aesni-sha1-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/aes/aesni-sha1-x86_64.s index 978bd2b6239c15..a38e21f0484e24 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/aes/aesni-sha1-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/aes/aesni-sha1-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha1_enc,@function .align 32 aesni_cbc_sha1_enc: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+0(%rip),%r10d movq OPENSSL_ia32cap_P+4(%rip),%r11 @@ -18,7 +18,7 @@ aesni_cbc_sha1_enc: je aesni_cbc_sha1_enc_avx jmp aesni_cbc_sha1_enc_ssse3 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc .type aesni_cbc_sha1_enc_ssse3,@function .align 32 @@ -2732,7 +2732,7 @@ K_XX_XX: .type aesni_cbc_sha1_enc_shaext,@function .align 32 aesni_cbc_sha1_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 movdqu (%r9),%xmm8 movd 16(%r9),%xmm9 @@ -3031,5 +3031,5 @@ aesni_cbc_sha1_enc_shaext: movdqu %xmm8,(%r9) movd %xmm9,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/aes/aesni-sha256-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/aes/aesni-sha256-x86_64.s index dd09f1b290af62..3e56a82578a354 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/aes/aesni-sha256-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/aes/aesni-sha256-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha256_enc,@function .align 16 aesni_cbc_sha256_enc: -.cfi_startproc +.cfi_startproc leaq OPENSSL_ia32cap_P(%rip),%r11 movl $1,%eax cmpq $0,%rdi @@ -31,7 +31,7 @@ aesni_cbc_sha256_enc: ud2 .Lprobe: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc,.-aesni_cbc_sha256_enc .align 64 @@ -4081,7 +4081,7 @@ aesni_cbc_sha256_enc_avx2: .type aesni_cbc_sha256_enc_shaext,@function .align 32 aesni_cbc_sha256_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 leaq K256+128(%rip),%rax movdqu (%r9),%xmm1 @@ -4431,5 +4431,5 @@ aesni_cbc_sha256_enc_shaext: movdqu %xmm1,(%r9) movdqu %xmm2,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc_shaext,.-aesni_cbc_sha256_enc_shaext diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/aes/aesni-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/aes/aesni-x86_64.s index c1e791eff59235..1a4b22e7b8a5d9 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/aes/aesni-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/aes/aesni-x86_64.s @@ -861,7 +861,7 @@ aesni_ecb_encrypt: .type aesni_ccm64_encrypt_blocks,@function .align 16 aesni_ccm64_encrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movdqu (%r8),%xmm6 movdqa .Lincrement64(%rip),%xmm9 @@ -920,13 +920,13 @@ aesni_ccm64_encrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks .globl aesni_ccm64_decrypt_blocks .type aesni_ccm64_decrypt_blocks,@function .align 16 aesni_ccm64_decrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movups (%r8),%xmm6 movdqu (%r9),%xmm3 @@ -1019,7 +1019,7 @@ aesni_ccm64_decrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks .globl aesni_ctr32_encrypt_blocks .type aesni_ctr32_encrypt_blocks,@function @@ -2794,7 +2794,7 @@ aesni_ocb_encrypt: .type __ocb_encrypt6,@function .align 32 __ocb_encrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2892,13 +2892,13 @@ __ocb_encrypt6: .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt6,.-__ocb_encrypt6 .type __ocb_encrypt4,@function .align 32 __ocb_encrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2963,13 +2963,13 @@ __ocb_encrypt4: .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt4,.-__ocb_encrypt4 .type __ocb_encrypt1,@function .align 32 __ocb_encrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm2,%xmm8 @@ -3000,7 +3000,7 @@ __ocb_encrypt1: .byte 102,15,56,221,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt1,.-__ocb_encrypt1 .globl aesni_ocb_decrypt @@ -3243,7 +3243,7 @@ aesni_ocb_decrypt: .type __ocb_decrypt6,@function .align 32 __ocb_decrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3335,13 +3335,13 @@ __ocb_decrypt6: .byte 102,65,15,56,223,246 .byte 102,65,15,56,223,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt6,.-__ocb_decrypt6 .type __ocb_decrypt4,@function .align 32 __ocb_decrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3402,13 +3402,13 @@ __ocb_decrypt4: .byte 102,65,15,56,223,228 .byte 102,65,15,56,223,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt4,.-__ocb_decrypt4 .type __ocb_decrypt1,@function .align 32 __ocb_decrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm7,%xmm2 @@ -3438,7 +3438,7 @@ __ocb_decrypt1: .byte 102,15,56,223,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt1,.-__ocb_decrypt1 .globl aesni_cbc_encrypt .type aesni_cbc_encrypt,@function @@ -4447,7 +4447,7 @@ __aesni_set_encrypt_key: shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_set_encrypt_key,.-aesni_set_encrypt_key .size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key .align 64 diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/bn/rsaz-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/bn/rsaz-x86_64.s index 7876e0b8f93d9c..d5025b23cd668e 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/bn/rsaz-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/bn/rsaz-x86_64.s @@ -1453,7 +1453,7 @@ rsaz_512_mul_by_one: .type __rsaz_512_reduce,@function .align 32 __rsaz_512_reduce: -.cfi_startproc +.cfi_startproc movq %r8,%rbx imulq 128+8(%rsp),%rbx movq 0(%rbp),%rax @@ -1533,12 +1533,12 @@ __rsaz_512_reduce: jne .Lreduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reduce,.-__rsaz_512_reduce .type __rsaz_512_reducex,@function .align 32 __rsaz_512_reducex: -.cfi_startproc +.cfi_startproc imulq %r8,%rdx xorq %rsi,%rsi @@ -1591,12 +1591,12 @@ __rsaz_512_reducex: jne .Lreduction_loopx .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reducex,.-__rsaz_512_reducex .type __rsaz_512_subtract,@function .align 32 __rsaz_512_subtract: -.cfi_startproc +.cfi_startproc movq %r8,(%rdi) movq %r9,8(%rdi) movq %r10,16(%rdi) @@ -1650,12 +1650,12 @@ __rsaz_512_subtract: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_subtract,.-__rsaz_512_subtract .type __rsaz_512_mul,@function .align 32 __rsaz_512_mul: -.cfi_startproc +.cfi_startproc leaq 8(%rsp),%rdi movq (%rsi),%rax @@ -1794,12 +1794,12 @@ __rsaz_512_mul: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mul,.-__rsaz_512_mul .type __rsaz_512_mulx,@function .align 32 __rsaz_512_mulx: -.cfi_startproc +.cfi_startproc mulxq (%rsi),%rbx,%r8 movq $-6,%rcx @@ -1916,13 +1916,13 @@ __rsaz_512_mulx: movq %r15,8+64+56(%rsp) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mulx,.-__rsaz_512_mulx .globl rsaz_512_scatter4 .type rsaz_512_scatter4,@function .align 16 rsaz_512_scatter4: -.cfi_startproc +.cfi_startproc leaq (%rdi,%rdx,8),%rdi movl $8,%r9d jmp .Loop_scatter @@ -1935,14 +1935,14 @@ rsaz_512_scatter4: decl %r9d jnz .Loop_scatter .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size rsaz_512_scatter4,.-rsaz_512_scatter4 .globl rsaz_512_gather4 .type rsaz_512_gather4,@function .align 16 rsaz_512_gather4: -.cfi_startproc +.cfi_startproc movd %edx,%xmm8 movdqa .Linc+16(%rip),%xmm1 movdqa .Linc(%rip),%xmm0 @@ -2006,7 +2006,7 @@ rsaz_512_gather4: jnz .Loop_gather .byte 0xf3,0xc3 .LSEH_end_rsaz_512_gather4: -.cfi_endproc +.cfi_endproc .size rsaz_512_gather4,.-rsaz_512_gather4 .align 64 diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/bn/x86_64-mont5.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/bn/x86_64-mont5.s index 40a60a3c8fc6b9..ab93b02d8c1aae 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/bn/x86_64-mont5.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/bn/x86_64-mont5.s @@ -550,7 +550,7 @@ bn_mul4x_mont_gather5: .type mul4x_internal,@function .align 32 mul4x_internal: -.cfi_startproc +.cfi_startproc shlq $5,%r9 movd 8(%rax),%xmm5 leaq .Linc(%rip),%rax @@ -1072,7 +1072,7 @@ mul4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry -.cfi_endproc +.cfi_endproc .size mul4x_internal,.-mul4x_internal .globl bn_power5 .type bn_power5,@function @@ -1215,7 +1215,7 @@ bn_power5: .align 32 bn_sqr8x_internal: __bn_sqr8x_internal: -.cfi_startproc +.cfi_startproc @@ -1990,12 +1990,12 @@ __bn_sqr8x_reduction: cmpq %rdx,%rdi jb .L8x_reduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_sqr8x_internal,.-bn_sqr8x_internal .type __bn_post4x_internal,@function .align 32 __bn_post4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx @@ -2046,18 +2046,18 @@ __bn_post4x_internal: movq %r9,%r10 negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_post4x_internal,.-__bn_post4x_internal .globl bn_from_montgomery .type bn_from_montgomery,@function .align 32 bn_from_montgomery: -.cfi_startproc +.cfi_startproc testl $7,%r9d jz bn_from_mont8x xorl %eax,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_from_montgomery,.-bn_from_montgomery .type bn_from_mont8x,@function @@ -2341,7 +2341,7 @@ bn_mulx4x_mont_gather5: .type mulx4x_internal,@function .align 32 mulx4x_internal: -.cfi_startproc +.cfi_startproc movq %r9,8(%rsp) movq %r9,%r10 negq %r9 @@ -2760,7 +2760,7 @@ mulx4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry -.cfi_endproc +.cfi_endproc .size mulx4x_internal,.-mulx4x_internal .type bn_powerx5,@function .align 32 @@ -3519,7 +3519,7 @@ __bn_sqrx8x_reduction: .size bn_sqrx8x_internal,.-bn_sqrx8x_internal .align 32 __bn_postx4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 @@ -3567,13 +3567,13 @@ __bn_postx4x_internal: negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_postx4x_internal,.-__bn_postx4x_internal .globl bn_get_bits5 .type bn_get_bits5,@function .align 16 bn_get_bits5: -.cfi_startproc +.cfi_startproc leaq 0(%rdi),%r10 leaq 1(%rdi),%r11 movl %esi,%ecx @@ -3587,14 +3587,14 @@ bn_get_bits5: shrl %cl,%eax andl $31,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_get_bits5,.-bn_get_bits5 .globl bn_scatter5 .type bn_scatter5,@function .align 16 bn_scatter5: -.cfi_startproc +.cfi_startproc cmpl $0,%esi jz .Lscatter_epilogue leaq (%rdx,%rcx,8),%rdx @@ -3607,7 +3607,7 @@ bn_scatter5: jnz .Lscatter .Lscatter_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_scatter5,.-bn_scatter5 .globl bn_gather5 @@ -3615,7 +3615,7 @@ bn_scatter5: .align 32 bn_gather5: .LSEH_begin_bn_gather5: -.cfi_startproc +.cfi_startproc .byte 0x4c,0x8d,0x14,0x24 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 @@ -3773,7 +3773,7 @@ bn_gather5: leaq (%r10),%rsp .byte 0xf3,0xc3 .LSEH_end_bn_gather5: -.cfi_endproc +.cfi_endproc .size bn_gather5,.-bn_gather5 .align 64 .Linc: diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/buildinf.h index 3814cc7977b026..0135fe7f77b45c 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-x32" -#define DATE "built on: Mon Mar 23 17:53:45 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:39 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/camellia/cmll-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/camellia/cmll-x86_64.s index eeb20dd2291da7..92056f8b1e0e0e 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/camellia/cmll-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/camellia/cmll-x86_64.s @@ -5,13 +5,13 @@ .type Camellia_EncryptBlock,@function .align 16 Camellia_EncryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Lenc_rounds -.cfi_endproc +.cfi_endproc .size Camellia_EncryptBlock,.-Camellia_EncryptBlock .globl Camellia_EncryptBlock_Rounds @@ -85,7 +85,7 @@ Camellia_EncryptBlock_Rounds: .type _x86_64_Camellia_encrypt,@function .align 16 _x86_64_Camellia_encrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -288,7 +288,7 @@ _x86_64_Camellia_encrypt: movl %edx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt @@ -296,13 +296,13 @@ _x86_64_Camellia_encrypt: .type Camellia_DecryptBlock,@function .align 16 Camellia_DecryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Ldec_rounds -.cfi_endproc +.cfi_endproc .size Camellia_DecryptBlock,.-Camellia_DecryptBlock .globl Camellia_DecryptBlock_Rounds @@ -376,7 +376,7 @@ Camellia_DecryptBlock_Rounds: .type _x86_64_Camellia_decrypt,@function .align 16 _x86_64_Camellia_decrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -580,7 +580,7 @@ _x86_64_Camellia_decrypt: movl %ebx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt .globl Camellia_Ekeygen .type Camellia_Ekeygen,@function diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s index 5c9e4050416212..80569cae04667e 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s @@ -3874,12 +3874,12 @@ ecp_nistz256_ord_sqr_montx: .type ecp_nistz256_to_mont,@function .align 32 ecp_nistz256_to_mont: -.cfi_startproc +.cfi_startproc movl $0x80100,%ecx andl OPENSSL_ia32cap_P+8(%rip),%ecx leaq .LRR(%rip),%rdx jmp .Lmul_mont -.cfi_endproc +.cfi_endproc .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont @@ -4823,7 +4823,7 @@ ecp_nistz256_from_mont: .type ecp_nistz256_scatter_w5,@function .align 32 ecp_nistz256_scatter_w5: -.cfi_startproc +.cfi_startproc leal -3(%rdx,%rdx,2),%edx movdqa 0(%rsi),%xmm0 shll $5,%edx @@ -4840,7 +4840,7 @@ ecp_nistz256_scatter_w5: movdqa %xmm5,80(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5 @@ -4914,7 +4914,7 @@ ecp_nistz256_gather_w5: .type ecp_nistz256_scatter_w7,@function .align 32 ecp_nistz256_scatter_w7: -.cfi_startproc +.cfi_startproc movdqu 0(%rsi),%xmm0 shll $6,%edx movdqu 16(%rsi),%xmm1 @@ -4926,7 +4926,7 @@ ecp_nistz256_scatter_w7: movdqa %xmm3,48(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7 diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/ec/x25519-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/ec/x25519-x86_64.s index 1788e568cda5d2..8fd319c83c880d 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/ec/x25519-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/ec/x25519-x86_64.s @@ -400,14 +400,14 @@ x25519_fe51_mul121666: .type x25519_fe64_eligible,@function .align 32 x25519_fe64_eligible: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+8(%rip),%ecx xorl %eax,%eax andl $0x80100,%ecx cmpl $0x80100,%ecx cmovel %ecx,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_eligible,.-x25519_fe64_eligible .globl x25519_fe64_mul @@ -650,7 +650,7 @@ x25519_fe64_sqr: .align 32 x25519_fe64_mul121666: .Lfe64_mul121666_body: -.cfi_startproc +.cfi_startproc movl $121666,%edx mulxq 0(%rsi),%r8,%rcx mulxq 8(%rsi),%r9,%rax @@ -679,7 +679,7 @@ x25519_fe64_mul121666: .Lfe64_mul121666_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_mul121666,.-x25519_fe64_mul121666 .globl x25519_fe64_add @@ -687,7 +687,7 @@ x25519_fe64_mul121666: .align 32 x25519_fe64_add: .Lfe64_add_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -716,7 +716,7 @@ x25519_fe64_add: .Lfe64_add_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_add,.-x25519_fe64_add .globl x25519_fe64_sub @@ -724,7 +724,7 @@ x25519_fe64_add: .align 32 x25519_fe64_sub: .Lfe64_sub_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -753,7 +753,7 @@ x25519_fe64_sub: .Lfe64_sub_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_sub,.-x25519_fe64_sub .globl x25519_fe64_tobytes @@ -761,7 +761,7 @@ x25519_fe64_sub: .align 32 x25519_fe64_tobytes: .Lfe64_to_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -797,6 +797,6 @@ x25519_fe64_tobytes: .Lfe64_to_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_tobytes,.-x25519_fe64_tobytes .byte 88,50,53,53,49,57,32,112,114,105,109,105,116,105,118,101,115,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/modes/aesni-gcm-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/modes/aesni-gcm-x86_64.s index 01d89630a42f73..bf508aff6ff6ec 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/modes/aesni-gcm-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/modes/aesni-gcm-x86_64.s @@ -3,7 +3,7 @@ .type _aesni_ctr32_ghash_6x,@function .align 32 _aesni_ctr32_ghash_6x: -.cfi_startproc +.cfi_startproc vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 @@ -311,7 +311,7 @@ _aesni_ctr32_ghash_6x: vpxor %xmm4,%xmm8,%xmm8 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x .globl aesni_gcm_decrypt .type aesni_gcm_decrypt,@function @@ -418,7 +418,7 @@ aesni_gcm_decrypt: .type _aesni_ctr32_6x,@function .align 32 _aesni_ctr32_6x: -.cfi_startproc +.cfi_startproc vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%rbp),%r13 @@ -505,7 +505,7 @@ _aesni_ctr32_6x: vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_6x,.-_aesni_ctr32_6x .globl aesni_gcm_encrypt diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/poly1305/poly1305-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/poly1305/poly1305-x86_64.s index 505c06471d45b0..34460e7bea6e16 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/poly1305/poly1305-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/poly1305/poly1305-x86_64.s @@ -12,7 +12,7 @@ .type poly1305_init,@function .align 32 poly1305_init: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -43,7 +43,7 @@ poly1305_init: movl $1,%eax .Lno_key: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init,.-poly1305_init .type poly1305_blocks,@function @@ -164,7 +164,7 @@ poly1305_blocks: .type poly1305_emit,@function .align 32 poly1305_emit: -.cfi_startproc +.cfi_startproc .Lemit: movq 0(%rdi),%r8 movq 8(%rdi),%r9 @@ -185,12 +185,12 @@ poly1305_emit: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit,.-poly1305_emit .type __poly1305_block,@function .align 32 __poly1305_block: -.cfi_startproc +.cfi_startproc mulq %r14 movq %rax,%r9 movq %r11,%rax @@ -230,13 +230,13 @@ __poly1305_block: adcq $0,%rbx adcq $0,%rbp .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_block,.-__poly1305_block .type __poly1305_init_avx,@function .align 32 __poly1305_init_avx: -.cfi_startproc +.cfi_startproc movq %r11,%r14 movq %r12,%rbx xorq %rbp,%rbp @@ -394,7 +394,7 @@ __poly1305_init_avx: leaq -48-64(%rdi),%rdi .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_init_avx,.-__poly1305_init_avx .type poly1305_blocks_avx,@function @@ -1235,7 +1235,7 @@ poly1305_blocks_avx: .type poly1305_emit_avx,@function .align 32 poly1305_emit_avx: -.cfi_startproc +.cfi_startproc cmpl $0,20(%rdi) je .Lemit @@ -1286,7 +1286,7 @@ poly1305_emit_avx: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_avx,.-poly1305_emit_avx .type poly1305_blocks_avx2,@function .align 32 @@ -1969,7 +1969,7 @@ poly1305_blocks_avx2: .type xor128_encrypt_n_pad,@function .align 16 xor128_encrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -2011,14 +2011,14 @@ xor128_encrypt_n_pad: .Ldone_enc: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad .globl xor128_decrypt_n_pad .type xor128_decrypt_n_pad,@function .align 16 xor128_decrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -2064,5 +2064,5 @@ xor128_decrypt_n_pad: .Ldone_dec: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/rc4/rc4-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/rc4/rc4-x86_64.s index b97c757550aad0..d1d1eece70bf1e 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/rc4/rc4-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/rc4/rc4-x86_64.s @@ -5,7 +5,7 @@ .type RC4,@function .align 16 RC4: -.cfi_startproc +.cfi_startproc orq %rsi,%rsi jne .Lentry .byte 0xf3,0xc3 @@ -534,7 +534,7 @@ RC4: .type RC4_set_key,@function .align 16 RC4_set_key: -.cfi_startproc +.cfi_startproc leaq 8(%rdi),%rdi leaq (%rdx,%rsi,1),%rdx negq %rsi @@ -601,14 +601,14 @@ RC4_set_key: movl %eax,-8(%rdi) movl %eax,-4(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size RC4_set_key,.-RC4_set_key .globl RC4_options .type RC4_options,@function .align 16 RC4_options: -.cfi_startproc +.cfi_startproc leaq .Lopts(%rip),%rax movl OPENSSL_ia32cap_P(%rip),%edx btl $20,%edx @@ -621,7 +621,7 @@ RC4_options: addq $12,%rax .Ldone: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .align 64 .Lopts: .byte 114,99,52,40,56,120,44,105,110,116,41,0 diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/sha/keccak1600-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/sha/keccak1600-x86_64.s index 09617d014bdb7b..11f26e933d80b5 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/sha/keccak1600-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/sha/keccak1600-x86_64.s @@ -3,7 +3,7 @@ .type __KeccakF1600,@function .align 32 __KeccakF1600: -.cfi_startproc +.cfi_startproc movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx @@ -256,7 +256,7 @@ __KeccakF1600: leaq -192(%r15),%r15 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __KeccakF1600,.-__KeccakF1600 .type KeccakF1600,@function diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/sha/sha1-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/sha/sha1-x86_64.s index 98541727e555da..d4efc7206f57b3 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/sha/sha1-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/sha/sha1-x86_64.s @@ -1422,7 +1422,7 @@ _shaext_shortcut: movdqu %xmm0,(%rdi) movd %xmm1,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext .type sha1_block_data_order_ssse3,@function .align 16 diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/sha/sha256-x86_64.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/sha/sha256-x86_64.s index 9357385da3c49b..a7b60900fdd061 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/sha/sha256-x86_64.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/sha/sha256-x86_64.s @@ -1775,7 +1775,7 @@ K256: .align 64 sha256_block_data_order_shaext: _shaext_shortcut: -.cfi_startproc +.cfi_startproc leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 @@ -1978,7 +1978,7 @@ _shaext_shortcut: movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext .type sha256_block_data_order_ssse3,@function .align 64 diff --git a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/x86_64cpuid.s b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/x86_64cpuid.s index 9268ce8c9a9d63..748e6d161fa24a 100644 --- a/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/x86_64cpuid.s +++ b/deps/openssl/config/archs/linux-x32/asm_avx2/crypto/x86_64cpuid.s @@ -12,7 +12,7 @@ .type OPENSSL_atomic_add,@function .align 16 OPENSSL_atomic_add: -.cfi_startproc +.cfi_startproc movl (%rdi),%eax .Lspin: leaq (%rsi,%rax,1),%r8 .byte 0xf0 @@ -21,19 +21,19 @@ OPENSSL_atomic_add: movl %r8d,%eax .byte 0x48,0x98 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_atomic_add,.-OPENSSL_atomic_add .globl OPENSSL_rdtsc .type OPENSSL_rdtsc,@function .align 16 OPENSSL_rdtsc: -.cfi_startproc +.cfi_startproc rdtsc shlq $32,%rdx orq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_rdtsc,.-OPENSSL_rdtsc .globl OPENSSL_ia32_cpuid @@ -209,7 +209,7 @@ OPENSSL_ia32_cpuid: .type OPENSSL_cleanse,@function .align 16 OPENSSL_cleanse: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $15,%rsi jae .Lot @@ -239,14 +239,14 @@ OPENSSL_cleanse: cmpq $0,%rsi jne .Little .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_cleanse,.-OPENSSL_cleanse .globl CRYPTO_memcmp .type CRYPTO_memcmp,@function .align 16 CRYPTO_memcmp: -.cfi_startproc +.cfi_startproc xorq %rax,%rax xorq %r10,%r10 cmpq $0,%rdx @@ -275,13 +275,13 @@ CRYPTO_memcmp: shrq $63,%rax .Lno_data: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size CRYPTO_memcmp,.-CRYPTO_memcmp .globl OPENSSL_wipe_cpu .type OPENSSL_wipe_cpu,@function .align 16 OPENSSL_wipe_cpu: -.cfi_startproc +.cfi_startproc pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 @@ -308,13 +308,13 @@ OPENSSL_wipe_cpu: xorq %r11,%r11 leaq 8(%rsp),%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu .globl OPENSSL_instrument_bus .type OPENSSL_instrument_bus,@function .align 16 OPENSSL_instrument_bus: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rsi,%r11 @@ -341,14 +341,14 @@ OPENSSL_instrument_bus: movq %r11,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus,.-OPENSSL_instrument_bus .globl OPENSSL_instrument_bus2 .type OPENSSL_instrument_bus2,@function .align 16 OPENSSL_instrument_bus2: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rdx,%r11 @@ -391,13 +391,13 @@ OPENSSL_instrument_bus2: movq 8(%rsp),%rax subq %rcx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus2,.-OPENSSL_instrument_bus2 .globl OPENSSL_ia32_rdrand_bytes .type OPENSSL_ia32_rdrand_bytes,@function .align 16 OPENSSL_ia32_rdrand_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdrand_bytes @@ -434,13 +434,13 @@ OPENSSL_ia32_rdrand_bytes: .Ldone_rdrand_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdrand_bytes,.-OPENSSL_ia32_rdrand_bytes .globl OPENSSL_ia32_rdseed_bytes .type OPENSSL_ia32_rdseed_bytes,@function .align 16 OPENSSL_ia32_rdseed_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdseed_bytes @@ -477,5 +477,5 @@ OPENSSL_ia32_rdseed_bytes: .Ldone_rdseed_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdseed_bytes,.-OPENSSL_ia32_rdseed_bytes diff --git a/deps/openssl/config/archs/linux-x32/no-asm/configdata.pm b/deps/openssl/config/archs/linux-x32/no-asm/configdata.pm index 5c10aa824deb4e..1c260c7710aedc 100644 --- a/deps/openssl/config/archs/linux-x32/no-asm/configdata.pm +++ b/deps/openssl/config/archs/linux-x32/no-asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-x32", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-x32/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-x32/no-asm/crypto/buildinf.h index d07ccb0fa228f9..5ec94fa2000240 100644 --- a/deps/openssl/config/archs/linux-x32/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-x32/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-x32" -#define DATE "built on: Mon Mar 23 17:54:09 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:48 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-x86_64/asm/configdata.pm b/deps/openssl/config/archs/linux-x86_64/asm/configdata.pm index 79aabd9d26e5ef..77027e45f9c39d 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/configdata.pm +++ b/deps/openssl/config/archs/linux-x86_64/asm/configdata.pm @@ -112,8 +112,8 @@ our %config = ( sourcedir => ".", target => "linux-x86_64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/aes/aesni-sha1-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/aes/aesni-sha1-x86_64.s index 978bd2b6239c15..a38e21f0484e24 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/aes/aesni-sha1-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/aes/aesni-sha1-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha1_enc,@function .align 32 aesni_cbc_sha1_enc: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+0(%rip),%r10d movq OPENSSL_ia32cap_P+4(%rip),%r11 @@ -18,7 +18,7 @@ aesni_cbc_sha1_enc: je aesni_cbc_sha1_enc_avx jmp aesni_cbc_sha1_enc_ssse3 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc .type aesni_cbc_sha1_enc_ssse3,@function .align 32 @@ -2732,7 +2732,7 @@ K_XX_XX: .type aesni_cbc_sha1_enc_shaext,@function .align 32 aesni_cbc_sha1_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 movdqu (%r9),%xmm8 movd 16(%r9),%xmm9 @@ -3031,5 +3031,5 @@ aesni_cbc_sha1_enc_shaext: movdqu %xmm8,(%r9) movd %xmm9,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/aes/aesni-sha256-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/aes/aesni-sha256-x86_64.s index dd09f1b290af62..3e56a82578a354 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/aes/aesni-sha256-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/aes/aesni-sha256-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha256_enc,@function .align 16 aesni_cbc_sha256_enc: -.cfi_startproc +.cfi_startproc leaq OPENSSL_ia32cap_P(%rip),%r11 movl $1,%eax cmpq $0,%rdi @@ -31,7 +31,7 @@ aesni_cbc_sha256_enc: ud2 .Lprobe: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc,.-aesni_cbc_sha256_enc .align 64 @@ -4081,7 +4081,7 @@ aesni_cbc_sha256_enc_avx2: .type aesni_cbc_sha256_enc_shaext,@function .align 32 aesni_cbc_sha256_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 leaq K256+128(%rip),%rax movdqu (%r9),%xmm1 @@ -4431,5 +4431,5 @@ aesni_cbc_sha256_enc_shaext: movdqu %xmm1,(%r9) movdqu %xmm2,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc_shaext,.-aesni_cbc_sha256_enc_shaext diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/aes/aesni-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/aes/aesni-x86_64.s index c1e791eff59235..1a4b22e7b8a5d9 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/aes/aesni-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/aes/aesni-x86_64.s @@ -861,7 +861,7 @@ aesni_ecb_encrypt: .type aesni_ccm64_encrypt_blocks,@function .align 16 aesni_ccm64_encrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movdqu (%r8),%xmm6 movdqa .Lincrement64(%rip),%xmm9 @@ -920,13 +920,13 @@ aesni_ccm64_encrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks .globl aesni_ccm64_decrypt_blocks .type aesni_ccm64_decrypt_blocks,@function .align 16 aesni_ccm64_decrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movups (%r8),%xmm6 movdqu (%r9),%xmm3 @@ -1019,7 +1019,7 @@ aesni_ccm64_decrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks .globl aesni_ctr32_encrypt_blocks .type aesni_ctr32_encrypt_blocks,@function @@ -2794,7 +2794,7 @@ aesni_ocb_encrypt: .type __ocb_encrypt6,@function .align 32 __ocb_encrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2892,13 +2892,13 @@ __ocb_encrypt6: .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt6,.-__ocb_encrypt6 .type __ocb_encrypt4,@function .align 32 __ocb_encrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2963,13 +2963,13 @@ __ocb_encrypt4: .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt4,.-__ocb_encrypt4 .type __ocb_encrypt1,@function .align 32 __ocb_encrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm2,%xmm8 @@ -3000,7 +3000,7 @@ __ocb_encrypt1: .byte 102,15,56,221,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt1,.-__ocb_encrypt1 .globl aesni_ocb_decrypt @@ -3243,7 +3243,7 @@ aesni_ocb_decrypt: .type __ocb_decrypt6,@function .align 32 __ocb_decrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3335,13 +3335,13 @@ __ocb_decrypt6: .byte 102,65,15,56,223,246 .byte 102,65,15,56,223,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt6,.-__ocb_decrypt6 .type __ocb_decrypt4,@function .align 32 __ocb_decrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3402,13 +3402,13 @@ __ocb_decrypt4: .byte 102,65,15,56,223,228 .byte 102,65,15,56,223,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt4,.-__ocb_decrypt4 .type __ocb_decrypt1,@function .align 32 __ocb_decrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm7,%xmm2 @@ -3438,7 +3438,7 @@ __ocb_decrypt1: .byte 102,15,56,223,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt1,.-__ocb_decrypt1 .globl aesni_cbc_encrypt .type aesni_cbc_encrypt,@function @@ -4447,7 +4447,7 @@ __aesni_set_encrypt_key: shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_set_encrypt_key,.-aesni_set_encrypt_key .size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key .align 64 diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/bn/rsaz-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/bn/rsaz-x86_64.s index 7876e0b8f93d9c..d5025b23cd668e 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/bn/rsaz-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/bn/rsaz-x86_64.s @@ -1453,7 +1453,7 @@ rsaz_512_mul_by_one: .type __rsaz_512_reduce,@function .align 32 __rsaz_512_reduce: -.cfi_startproc +.cfi_startproc movq %r8,%rbx imulq 128+8(%rsp),%rbx movq 0(%rbp),%rax @@ -1533,12 +1533,12 @@ __rsaz_512_reduce: jne .Lreduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reduce,.-__rsaz_512_reduce .type __rsaz_512_reducex,@function .align 32 __rsaz_512_reducex: -.cfi_startproc +.cfi_startproc imulq %r8,%rdx xorq %rsi,%rsi @@ -1591,12 +1591,12 @@ __rsaz_512_reducex: jne .Lreduction_loopx .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reducex,.-__rsaz_512_reducex .type __rsaz_512_subtract,@function .align 32 __rsaz_512_subtract: -.cfi_startproc +.cfi_startproc movq %r8,(%rdi) movq %r9,8(%rdi) movq %r10,16(%rdi) @@ -1650,12 +1650,12 @@ __rsaz_512_subtract: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_subtract,.-__rsaz_512_subtract .type __rsaz_512_mul,@function .align 32 __rsaz_512_mul: -.cfi_startproc +.cfi_startproc leaq 8(%rsp),%rdi movq (%rsi),%rax @@ -1794,12 +1794,12 @@ __rsaz_512_mul: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mul,.-__rsaz_512_mul .type __rsaz_512_mulx,@function .align 32 __rsaz_512_mulx: -.cfi_startproc +.cfi_startproc mulxq (%rsi),%rbx,%r8 movq $-6,%rcx @@ -1916,13 +1916,13 @@ __rsaz_512_mulx: movq %r15,8+64+56(%rsp) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mulx,.-__rsaz_512_mulx .globl rsaz_512_scatter4 .type rsaz_512_scatter4,@function .align 16 rsaz_512_scatter4: -.cfi_startproc +.cfi_startproc leaq (%rdi,%rdx,8),%rdi movl $8,%r9d jmp .Loop_scatter @@ -1935,14 +1935,14 @@ rsaz_512_scatter4: decl %r9d jnz .Loop_scatter .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size rsaz_512_scatter4,.-rsaz_512_scatter4 .globl rsaz_512_gather4 .type rsaz_512_gather4,@function .align 16 rsaz_512_gather4: -.cfi_startproc +.cfi_startproc movd %edx,%xmm8 movdqa .Linc+16(%rip),%xmm1 movdqa .Linc(%rip),%xmm0 @@ -2006,7 +2006,7 @@ rsaz_512_gather4: jnz .Loop_gather .byte 0xf3,0xc3 .LSEH_end_rsaz_512_gather4: -.cfi_endproc +.cfi_endproc .size rsaz_512_gather4,.-rsaz_512_gather4 .align 64 diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/bn/x86_64-mont5.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/bn/x86_64-mont5.s index 40a60a3c8fc6b9..ab93b02d8c1aae 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/bn/x86_64-mont5.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/bn/x86_64-mont5.s @@ -550,7 +550,7 @@ bn_mul4x_mont_gather5: .type mul4x_internal,@function .align 32 mul4x_internal: -.cfi_startproc +.cfi_startproc shlq $5,%r9 movd 8(%rax),%xmm5 leaq .Linc(%rip),%rax @@ -1072,7 +1072,7 @@ mul4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry -.cfi_endproc +.cfi_endproc .size mul4x_internal,.-mul4x_internal .globl bn_power5 .type bn_power5,@function @@ -1215,7 +1215,7 @@ bn_power5: .align 32 bn_sqr8x_internal: __bn_sqr8x_internal: -.cfi_startproc +.cfi_startproc @@ -1990,12 +1990,12 @@ __bn_sqr8x_reduction: cmpq %rdx,%rdi jb .L8x_reduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_sqr8x_internal,.-bn_sqr8x_internal .type __bn_post4x_internal,@function .align 32 __bn_post4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx @@ -2046,18 +2046,18 @@ __bn_post4x_internal: movq %r9,%r10 negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_post4x_internal,.-__bn_post4x_internal .globl bn_from_montgomery .type bn_from_montgomery,@function .align 32 bn_from_montgomery: -.cfi_startproc +.cfi_startproc testl $7,%r9d jz bn_from_mont8x xorl %eax,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_from_montgomery,.-bn_from_montgomery .type bn_from_mont8x,@function @@ -2341,7 +2341,7 @@ bn_mulx4x_mont_gather5: .type mulx4x_internal,@function .align 32 mulx4x_internal: -.cfi_startproc +.cfi_startproc movq %r9,8(%rsp) movq %r9,%r10 negq %r9 @@ -2760,7 +2760,7 @@ mulx4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry -.cfi_endproc +.cfi_endproc .size mulx4x_internal,.-mulx4x_internal .type bn_powerx5,@function .align 32 @@ -3519,7 +3519,7 @@ __bn_sqrx8x_reduction: .size bn_sqrx8x_internal,.-bn_sqrx8x_internal .align 32 __bn_postx4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 @@ -3567,13 +3567,13 @@ __bn_postx4x_internal: negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_postx4x_internal,.-__bn_postx4x_internal .globl bn_get_bits5 .type bn_get_bits5,@function .align 16 bn_get_bits5: -.cfi_startproc +.cfi_startproc leaq 0(%rdi),%r10 leaq 1(%rdi),%r11 movl %esi,%ecx @@ -3587,14 +3587,14 @@ bn_get_bits5: shrl %cl,%eax andl $31,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_get_bits5,.-bn_get_bits5 .globl bn_scatter5 .type bn_scatter5,@function .align 16 bn_scatter5: -.cfi_startproc +.cfi_startproc cmpl $0,%esi jz .Lscatter_epilogue leaq (%rdx,%rcx,8),%rdx @@ -3607,7 +3607,7 @@ bn_scatter5: jnz .Lscatter .Lscatter_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_scatter5,.-bn_scatter5 .globl bn_gather5 @@ -3615,7 +3615,7 @@ bn_scatter5: .align 32 bn_gather5: .LSEH_begin_bn_gather5: -.cfi_startproc +.cfi_startproc .byte 0x4c,0x8d,0x14,0x24 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 @@ -3773,7 +3773,7 @@ bn_gather5: leaq (%r10),%rsp .byte 0xf3,0xc3 .LSEH_end_bn_gather5: -.cfi_endproc +.cfi_endproc .size bn_gather5,.-bn_gather5 .align 64 .Linc: diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-x86_64/asm/crypto/buildinf.h index 08c7674277be2e..89e382884cd1fd 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-x86_64" -#define DATE "built on: Mon Mar 23 17:54:15 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:50 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/camellia/cmll-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/camellia/cmll-x86_64.s index eeb20dd2291da7..92056f8b1e0e0e 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/camellia/cmll-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/camellia/cmll-x86_64.s @@ -5,13 +5,13 @@ .type Camellia_EncryptBlock,@function .align 16 Camellia_EncryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Lenc_rounds -.cfi_endproc +.cfi_endproc .size Camellia_EncryptBlock,.-Camellia_EncryptBlock .globl Camellia_EncryptBlock_Rounds @@ -85,7 +85,7 @@ Camellia_EncryptBlock_Rounds: .type _x86_64_Camellia_encrypt,@function .align 16 _x86_64_Camellia_encrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -288,7 +288,7 @@ _x86_64_Camellia_encrypt: movl %edx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt @@ -296,13 +296,13 @@ _x86_64_Camellia_encrypt: .type Camellia_DecryptBlock,@function .align 16 Camellia_DecryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Ldec_rounds -.cfi_endproc +.cfi_endproc .size Camellia_DecryptBlock,.-Camellia_DecryptBlock .globl Camellia_DecryptBlock_Rounds @@ -376,7 +376,7 @@ Camellia_DecryptBlock_Rounds: .type _x86_64_Camellia_decrypt,@function .align 16 _x86_64_Camellia_decrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -580,7 +580,7 @@ _x86_64_Camellia_decrypt: movl %ebx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt .globl Camellia_Ekeygen .type Camellia_Ekeygen,@function diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/ec/ecp_nistz256-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/ec/ecp_nistz256-x86_64.s index 5c9e4050416212..80569cae04667e 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/ec/ecp_nistz256-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/ec/ecp_nistz256-x86_64.s @@ -3874,12 +3874,12 @@ ecp_nistz256_ord_sqr_montx: .type ecp_nistz256_to_mont,@function .align 32 ecp_nistz256_to_mont: -.cfi_startproc +.cfi_startproc movl $0x80100,%ecx andl OPENSSL_ia32cap_P+8(%rip),%ecx leaq .LRR(%rip),%rdx jmp .Lmul_mont -.cfi_endproc +.cfi_endproc .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont @@ -4823,7 +4823,7 @@ ecp_nistz256_from_mont: .type ecp_nistz256_scatter_w5,@function .align 32 ecp_nistz256_scatter_w5: -.cfi_startproc +.cfi_startproc leal -3(%rdx,%rdx,2),%edx movdqa 0(%rsi),%xmm0 shll $5,%edx @@ -4840,7 +4840,7 @@ ecp_nistz256_scatter_w5: movdqa %xmm5,80(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5 @@ -4914,7 +4914,7 @@ ecp_nistz256_gather_w5: .type ecp_nistz256_scatter_w7,@function .align 32 ecp_nistz256_scatter_w7: -.cfi_startproc +.cfi_startproc movdqu 0(%rsi),%xmm0 shll $6,%edx movdqu 16(%rsi),%xmm1 @@ -4926,7 +4926,7 @@ ecp_nistz256_scatter_w7: movdqa %xmm3,48(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7 diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/ec/x25519-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/ec/x25519-x86_64.s index 1788e568cda5d2..8fd319c83c880d 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/ec/x25519-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/ec/x25519-x86_64.s @@ -400,14 +400,14 @@ x25519_fe51_mul121666: .type x25519_fe64_eligible,@function .align 32 x25519_fe64_eligible: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+8(%rip),%ecx xorl %eax,%eax andl $0x80100,%ecx cmpl $0x80100,%ecx cmovel %ecx,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_eligible,.-x25519_fe64_eligible .globl x25519_fe64_mul @@ -650,7 +650,7 @@ x25519_fe64_sqr: .align 32 x25519_fe64_mul121666: .Lfe64_mul121666_body: -.cfi_startproc +.cfi_startproc movl $121666,%edx mulxq 0(%rsi),%r8,%rcx mulxq 8(%rsi),%r9,%rax @@ -679,7 +679,7 @@ x25519_fe64_mul121666: .Lfe64_mul121666_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_mul121666,.-x25519_fe64_mul121666 .globl x25519_fe64_add @@ -687,7 +687,7 @@ x25519_fe64_mul121666: .align 32 x25519_fe64_add: .Lfe64_add_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -716,7 +716,7 @@ x25519_fe64_add: .Lfe64_add_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_add,.-x25519_fe64_add .globl x25519_fe64_sub @@ -724,7 +724,7 @@ x25519_fe64_add: .align 32 x25519_fe64_sub: .Lfe64_sub_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -753,7 +753,7 @@ x25519_fe64_sub: .Lfe64_sub_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_sub,.-x25519_fe64_sub .globl x25519_fe64_tobytes @@ -761,7 +761,7 @@ x25519_fe64_sub: .align 32 x25519_fe64_tobytes: .Lfe64_to_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -797,6 +797,6 @@ x25519_fe64_tobytes: .Lfe64_to_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_tobytes,.-x25519_fe64_tobytes .byte 88,50,53,53,49,57,32,112,114,105,109,105,116,105,118,101,115,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/modes/aesni-gcm-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/modes/aesni-gcm-x86_64.s index 01d89630a42f73..bf508aff6ff6ec 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/modes/aesni-gcm-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/modes/aesni-gcm-x86_64.s @@ -3,7 +3,7 @@ .type _aesni_ctr32_ghash_6x,@function .align 32 _aesni_ctr32_ghash_6x: -.cfi_startproc +.cfi_startproc vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 @@ -311,7 +311,7 @@ _aesni_ctr32_ghash_6x: vpxor %xmm4,%xmm8,%xmm8 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x .globl aesni_gcm_decrypt .type aesni_gcm_decrypt,@function @@ -418,7 +418,7 @@ aesni_gcm_decrypt: .type _aesni_ctr32_6x,@function .align 32 _aesni_ctr32_6x: -.cfi_startproc +.cfi_startproc vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%rbp),%r13 @@ -505,7 +505,7 @@ _aesni_ctr32_6x: vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_6x,.-_aesni_ctr32_6x .globl aesni_gcm_encrypt diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/poly1305/poly1305-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/poly1305/poly1305-x86_64.s index 987a65aab38147..9bb9be4632f53c 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/poly1305/poly1305-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/poly1305/poly1305-x86_64.s @@ -12,7 +12,7 @@ .type poly1305_init,@function .align 32 poly1305_init: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -48,7 +48,7 @@ poly1305_init: movl $1,%eax .Lno_key: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init,.-poly1305_init .type poly1305_blocks,@function @@ -169,7 +169,7 @@ poly1305_blocks: .type poly1305_emit,@function .align 32 poly1305_emit: -.cfi_startproc +.cfi_startproc .Lemit: movq 0(%rdi),%r8 movq 8(%rdi),%r9 @@ -190,12 +190,12 @@ poly1305_emit: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit,.-poly1305_emit .type __poly1305_block,@function .align 32 __poly1305_block: -.cfi_startproc +.cfi_startproc mulq %r14 movq %rax,%r9 movq %r11,%rax @@ -235,13 +235,13 @@ __poly1305_block: adcq $0,%rbx adcq $0,%rbp .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_block,.-__poly1305_block .type __poly1305_init_avx,@function .align 32 __poly1305_init_avx: -.cfi_startproc +.cfi_startproc movq %r11,%r14 movq %r12,%rbx xorq %rbp,%rbp @@ -399,7 +399,7 @@ __poly1305_init_avx: leaq -48-64(%rdi),%rdi .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_init_avx,.-__poly1305_init_avx .type poly1305_blocks_avx,@function @@ -1240,7 +1240,7 @@ poly1305_blocks_avx: .type poly1305_emit_avx,@function .align 32 poly1305_emit_avx: -.cfi_startproc +.cfi_startproc cmpl $0,20(%rdi) je .Lemit @@ -1291,7 +1291,7 @@ poly1305_emit_avx: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_avx,.-poly1305_emit_avx .type poly1305_blocks_avx2,@function .align 32 @@ -2488,7 +2488,7 @@ poly1305_blocks_avx512: .type poly1305_init_base2_44,@function .align 32 poly1305_init_base2_44: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -2522,12 +2522,12 @@ poly1305_init_base2_44: movq %r11,8(%rdx) movl $1,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init_base2_44,.-poly1305_init_base2_44 .type poly1305_blocks_vpmadd52,@function .align 32 poly1305_blocks_vpmadd52: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52 @@ -2634,12 +2634,12 @@ poly1305_blocks_vpmadd52: .Lno_data_vpmadd52: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52 .type poly1305_blocks_vpmadd52_4x,@function .align 32 poly1305_blocks_vpmadd52_4x: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52_4x @@ -3064,12 +3064,12 @@ poly1305_blocks_vpmadd52_4x: .Lno_data_vpmadd52_4x: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x .type poly1305_blocks_vpmadd52_8x,@function .align 32 poly1305_blocks_vpmadd52_8x: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52_8x @@ -3410,12 +3410,12 @@ poly1305_blocks_vpmadd52_8x: .Lno_data_vpmadd52_8x: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x .type poly1305_emit_base2_44,@function .align 32 poly1305_emit_base2_44: -.cfi_startproc +.cfi_startproc movq 0(%rdi),%r8 movq 8(%rdi),%r9 movq 16(%rdi),%r10 @@ -3446,7 +3446,7 @@ poly1305_emit_base2_44: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_base2_44,.-poly1305_emit_base2_44 .align 64 .Lconst: @@ -3485,7 +3485,7 @@ poly1305_emit_base2_44: .type xor128_encrypt_n_pad,@function .align 16 xor128_encrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -3527,14 +3527,14 @@ xor128_encrypt_n_pad: .Ldone_enc: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad .globl xor128_decrypt_n_pad .type xor128_decrypt_n_pad,@function .align 16 xor128_decrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -3580,5 +3580,5 @@ xor128_decrypt_n_pad: .Ldone_dec: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/rc4/rc4-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/rc4/rc4-x86_64.s index b97c757550aad0..d1d1eece70bf1e 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/rc4/rc4-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/rc4/rc4-x86_64.s @@ -5,7 +5,7 @@ .type RC4,@function .align 16 RC4: -.cfi_startproc +.cfi_startproc orq %rsi,%rsi jne .Lentry .byte 0xf3,0xc3 @@ -534,7 +534,7 @@ RC4: .type RC4_set_key,@function .align 16 RC4_set_key: -.cfi_startproc +.cfi_startproc leaq 8(%rdi),%rdi leaq (%rdx,%rsi,1),%rdx negq %rsi @@ -601,14 +601,14 @@ RC4_set_key: movl %eax,-8(%rdi) movl %eax,-4(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size RC4_set_key,.-RC4_set_key .globl RC4_options .type RC4_options,@function .align 16 RC4_options: -.cfi_startproc +.cfi_startproc leaq .Lopts(%rip),%rax movl OPENSSL_ia32cap_P(%rip),%edx btl $20,%edx @@ -621,7 +621,7 @@ RC4_options: addq $12,%rax .Ldone: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .align 64 .Lopts: .byte 114,99,52,40,56,120,44,105,110,116,41,0 diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/sha/keccak1600-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/sha/keccak1600-x86_64.s index 09617d014bdb7b..11f26e933d80b5 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/sha/keccak1600-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/sha/keccak1600-x86_64.s @@ -3,7 +3,7 @@ .type __KeccakF1600,@function .align 32 __KeccakF1600: -.cfi_startproc +.cfi_startproc movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx @@ -256,7 +256,7 @@ __KeccakF1600: leaq -192(%r15),%r15 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __KeccakF1600,.-__KeccakF1600 .type KeccakF1600,@function diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/sha/sha1-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/sha/sha1-x86_64.s index 98541727e555da..d4efc7206f57b3 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/sha/sha1-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/sha/sha1-x86_64.s @@ -1422,7 +1422,7 @@ _shaext_shortcut: movdqu %xmm0,(%rdi) movd %xmm1,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext .type sha1_block_data_order_ssse3,@function .align 16 diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/sha/sha256-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/sha/sha256-x86_64.s index 9357385da3c49b..a7b60900fdd061 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/sha/sha256-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/sha/sha256-x86_64.s @@ -1775,7 +1775,7 @@ K256: .align 64 sha256_block_data_order_shaext: _shaext_shortcut: -.cfi_startproc +.cfi_startproc leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 @@ -1978,7 +1978,7 @@ _shaext_shortcut: movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext .type sha256_block_data_order_ssse3,@function .align 64 diff --git a/deps/openssl/config/archs/linux-x86_64/asm/crypto/x86_64cpuid.s b/deps/openssl/config/archs/linux-x86_64/asm/crypto/x86_64cpuid.s index 9268ce8c9a9d63..748e6d161fa24a 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm/crypto/x86_64cpuid.s +++ b/deps/openssl/config/archs/linux-x86_64/asm/crypto/x86_64cpuid.s @@ -12,7 +12,7 @@ .type OPENSSL_atomic_add,@function .align 16 OPENSSL_atomic_add: -.cfi_startproc +.cfi_startproc movl (%rdi),%eax .Lspin: leaq (%rsi,%rax,1),%r8 .byte 0xf0 @@ -21,19 +21,19 @@ OPENSSL_atomic_add: movl %r8d,%eax .byte 0x48,0x98 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_atomic_add,.-OPENSSL_atomic_add .globl OPENSSL_rdtsc .type OPENSSL_rdtsc,@function .align 16 OPENSSL_rdtsc: -.cfi_startproc +.cfi_startproc rdtsc shlq $32,%rdx orq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_rdtsc,.-OPENSSL_rdtsc .globl OPENSSL_ia32_cpuid @@ -209,7 +209,7 @@ OPENSSL_ia32_cpuid: .type OPENSSL_cleanse,@function .align 16 OPENSSL_cleanse: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $15,%rsi jae .Lot @@ -239,14 +239,14 @@ OPENSSL_cleanse: cmpq $0,%rsi jne .Little .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_cleanse,.-OPENSSL_cleanse .globl CRYPTO_memcmp .type CRYPTO_memcmp,@function .align 16 CRYPTO_memcmp: -.cfi_startproc +.cfi_startproc xorq %rax,%rax xorq %r10,%r10 cmpq $0,%rdx @@ -275,13 +275,13 @@ CRYPTO_memcmp: shrq $63,%rax .Lno_data: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size CRYPTO_memcmp,.-CRYPTO_memcmp .globl OPENSSL_wipe_cpu .type OPENSSL_wipe_cpu,@function .align 16 OPENSSL_wipe_cpu: -.cfi_startproc +.cfi_startproc pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 @@ -308,13 +308,13 @@ OPENSSL_wipe_cpu: xorq %r11,%r11 leaq 8(%rsp),%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu .globl OPENSSL_instrument_bus .type OPENSSL_instrument_bus,@function .align 16 OPENSSL_instrument_bus: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rsi,%r11 @@ -341,14 +341,14 @@ OPENSSL_instrument_bus: movq %r11,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus,.-OPENSSL_instrument_bus .globl OPENSSL_instrument_bus2 .type OPENSSL_instrument_bus2,@function .align 16 OPENSSL_instrument_bus2: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rdx,%r11 @@ -391,13 +391,13 @@ OPENSSL_instrument_bus2: movq 8(%rsp),%rax subq %rcx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus2,.-OPENSSL_instrument_bus2 .globl OPENSSL_ia32_rdrand_bytes .type OPENSSL_ia32_rdrand_bytes,@function .align 16 OPENSSL_ia32_rdrand_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdrand_bytes @@ -434,13 +434,13 @@ OPENSSL_ia32_rdrand_bytes: .Ldone_rdrand_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdrand_bytes,.-OPENSSL_ia32_rdrand_bytes .globl OPENSSL_ia32_rdseed_bytes .type OPENSSL_ia32_rdseed_bytes,@function .align 16 OPENSSL_ia32_rdseed_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdseed_bytes @@ -477,5 +477,5 @@ OPENSSL_ia32_rdseed_bytes: .Ldone_rdseed_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdseed_bytes,.-OPENSSL_ia32_rdseed_bytes diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/configdata.pm b/deps/openssl/config/archs/linux-x86_64/asm_avx2/configdata.pm index 090ba6dbbc92e4..7920e99bdfd61d 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/configdata.pm @@ -112,8 +112,8 @@ our %config = ( sourcedir => ".", target => "linux-x86_64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/aes/aesni-sha1-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/aes/aesni-sha1-x86_64.s index 978bd2b6239c15..a38e21f0484e24 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/aes/aesni-sha1-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/aes/aesni-sha1-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha1_enc,@function .align 32 aesni_cbc_sha1_enc: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+0(%rip),%r10d movq OPENSSL_ia32cap_P+4(%rip),%r11 @@ -18,7 +18,7 @@ aesni_cbc_sha1_enc: je aesni_cbc_sha1_enc_avx jmp aesni_cbc_sha1_enc_ssse3 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc .type aesni_cbc_sha1_enc_ssse3,@function .align 32 @@ -2732,7 +2732,7 @@ K_XX_XX: .type aesni_cbc_sha1_enc_shaext,@function .align 32 aesni_cbc_sha1_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 movdqu (%r9),%xmm8 movd 16(%r9),%xmm9 @@ -3031,5 +3031,5 @@ aesni_cbc_sha1_enc_shaext: movdqu %xmm8,(%r9) movd %xmm9,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/aes/aesni-sha256-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/aes/aesni-sha256-x86_64.s index dd09f1b290af62..3e56a82578a354 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/aes/aesni-sha256-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/aes/aesni-sha256-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha256_enc,@function .align 16 aesni_cbc_sha256_enc: -.cfi_startproc +.cfi_startproc leaq OPENSSL_ia32cap_P(%rip),%r11 movl $1,%eax cmpq $0,%rdi @@ -31,7 +31,7 @@ aesni_cbc_sha256_enc: ud2 .Lprobe: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc,.-aesni_cbc_sha256_enc .align 64 @@ -4081,7 +4081,7 @@ aesni_cbc_sha256_enc_avx2: .type aesni_cbc_sha256_enc_shaext,@function .align 32 aesni_cbc_sha256_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 leaq K256+128(%rip),%rax movdqu (%r9),%xmm1 @@ -4431,5 +4431,5 @@ aesni_cbc_sha256_enc_shaext: movdqu %xmm1,(%r9) movdqu %xmm2,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc_shaext,.-aesni_cbc_sha256_enc_shaext diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/aes/aesni-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/aes/aesni-x86_64.s index c1e791eff59235..1a4b22e7b8a5d9 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/aes/aesni-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/aes/aesni-x86_64.s @@ -861,7 +861,7 @@ aesni_ecb_encrypt: .type aesni_ccm64_encrypt_blocks,@function .align 16 aesni_ccm64_encrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movdqu (%r8),%xmm6 movdqa .Lincrement64(%rip),%xmm9 @@ -920,13 +920,13 @@ aesni_ccm64_encrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks .globl aesni_ccm64_decrypt_blocks .type aesni_ccm64_decrypt_blocks,@function .align 16 aesni_ccm64_decrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movups (%r8),%xmm6 movdqu (%r9),%xmm3 @@ -1019,7 +1019,7 @@ aesni_ccm64_decrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks .globl aesni_ctr32_encrypt_blocks .type aesni_ctr32_encrypt_blocks,@function @@ -2794,7 +2794,7 @@ aesni_ocb_encrypt: .type __ocb_encrypt6,@function .align 32 __ocb_encrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2892,13 +2892,13 @@ __ocb_encrypt6: .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt6,.-__ocb_encrypt6 .type __ocb_encrypt4,@function .align 32 __ocb_encrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2963,13 +2963,13 @@ __ocb_encrypt4: .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt4,.-__ocb_encrypt4 .type __ocb_encrypt1,@function .align 32 __ocb_encrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm2,%xmm8 @@ -3000,7 +3000,7 @@ __ocb_encrypt1: .byte 102,15,56,221,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt1,.-__ocb_encrypt1 .globl aesni_ocb_decrypt @@ -3243,7 +3243,7 @@ aesni_ocb_decrypt: .type __ocb_decrypt6,@function .align 32 __ocb_decrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3335,13 +3335,13 @@ __ocb_decrypt6: .byte 102,65,15,56,223,246 .byte 102,65,15,56,223,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt6,.-__ocb_decrypt6 .type __ocb_decrypt4,@function .align 32 __ocb_decrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3402,13 +3402,13 @@ __ocb_decrypt4: .byte 102,65,15,56,223,228 .byte 102,65,15,56,223,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt4,.-__ocb_decrypt4 .type __ocb_decrypt1,@function .align 32 __ocb_decrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm7,%xmm2 @@ -3438,7 +3438,7 @@ __ocb_decrypt1: .byte 102,15,56,223,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt1,.-__ocb_decrypt1 .globl aesni_cbc_encrypt .type aesni_cbc_encrypt,@function @@ -4447,7 +4447,7 @@ __aesni_set_encrypt_key: shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_set_encrypt_key,.-aesni_set_encrypt_key .size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key .align 64 diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/bn/rsaz-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/bn/rsaz-x86_64.s index 7876e0b8f93d9c..d5025b23cd668e 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/bn/rsaz-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/bn/rsaz-x86_64.s @@ -1453,7 +1453,7 @@ rsaz_512_mul_by_one: .type __rsaz_512_reduce,@function .align 32 __rsaz_512_reduce: -.cfi_startproc +.cfi_startproc movq %r8,%rbx imulq 128+8(%rsp),%rbx movq 0(%rbp),%rax @@ -1533,12 +1533,12 @@ __rsaz_512_reduce: jne .Lreduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reduce,.-__rsaz_512_reduce .type __rsaz_512_reducex,@function .align 32 __rsaz_512_reducex: -.cfi_startproc +.cfi_startproc imulq %r8,%rdx xorq %rsi,%rsi @@ -1591,12 +1591,12 @@ __rsaz_512_reducex: jne .Lreduction_loopx .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reducex,.-__rsaz_512_reducex .type __rsaz_512_subtract,@function .align 32 __rsaz_512_subtract: -.cfi_startproc +.cfi_startproc movq %r8,(%rdi) movq %r9,8(%rdi) movq %r10,16(%rdi) @@ -1650,12 +1650,12 @@ __rsaz_512_subtract: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_subtract,.-__rsaz_512_subtract .type __rsaz_512_mul,@function .align 32 __rsaz_512_mul: -.cfi_startproc +.cfi_startproc leaq 8(%rsp),%rdi movq (%rsi),%rax @@ -1794,12 +1794,12 @@ __rsaz_512_mul: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mul,.-__rsaz_512_mul .type __rsaz_512_mulx,@function .align 32 __rsaz_512_mulx: -.cfi_startproc +.cfi_startproc mulxq (%rsi),%rbx,%r8 movq $-6,%rcx @@ -1916,13 +1916,13 @@ __rsaz_512_mulx: movq %r15,8+64+56(%rsp) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mulx,.-__rsaz_512_mulx .globl rsaz_512_scatter4 .type rsaz_512_scatter4,@function .align 16 rsaz_512_scatter4: -.cfi_startproc +.cfi_startproc leaq (%rdi,%rdx,8),%rdi movl $8,%r9d jmp .Loop_scatter @@ -1935,14 +1935,14 @@ rsaz_512_scatter4: decl %r9d jnz .Loop_scatter .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size rsaz_512_scatter4,.-rsaz_512_scatter4 .globl rsaz_512_gather4 .type rsaz_512_gather4,@function .align 16 rsaz_512_gather4: -.cfi_startproc +.cfi_startproc movd %edx,%xmm8 movdqa .Linc+16(%rip),%xmm1 movdqa .Linc(%rip),%xmm0 @@ -2006,7 +2006,7 @@ rsaz_512_gather4: jnz .Loop_gather .byte 0xf3,0xc3 .LSEH_end_rsaz_512_gather4: -.cfi_endproc +.cfi_endproc .size rsaz_512_gather4,.-rsaz_512_gather4 .align 64 diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/bn/x86_64-mont5.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/bn/x86_64-mont5.s index 40a60a3c8fc6b9..ab93b02d8c1aae 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/bn/x86_64-mont5.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/bn/x86_64-mont5.s @@ -550,7 +550,7 @@ bn_mul4x_mont_gather5: .type mul4x_internal,@function .align 32 mul4x_internal: -.cfi_startproc +.cfi_startproc shlq $5,%r9 movd 8(%rax),%xmm5 leaq .Linc(%rip),%rax @@ -1072,7 +1072,7 @@ mul4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry -.cfi_endproc +.cfi_endproc .size mul4x_internal,.-mul4x_internal .globl bn_power5 .type bn_power5,@function @@ -1215,7 +1215,7 @@ bn_power5: .align 32 bn_sqr8x_internal: __bn_sqr8x_internal: -.cfi_startproc +.cfi_startproc @@ -1990,12 +1990,12 @@ __bn_sqr8x_reduction: cmpq %rdx,%rdi jb .L8x_reduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_sqr8x_internal,.-bn_sqr8x_internal .type __bn_post4x_internal,@function .align 32 __bn_post4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx @@ -2046,18 +2046,18 @@ __bn_post4x_internal: movq %r9,%r10 negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_post4x_internal,.-__bn_post4x_internal .globl bn_from_montgomery .type bn_from_montgomery,@function .align 32 bn_from_montgomery: -.cfi_startproc +.cfi_startproc testl $7,%r9d jz bn_from_mont8x xorl %eax,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_from_montgomery,.-bn_from_montgomery .type bn_from_mont8x,@function @@ -2341,7 +2341,7 @@ bn_mulx4x_mont_gather5: .type mulx4x_internal,@function .align 32 mulx4x_internal: -.cfi_startproc +.cfi_startproc movq %r9,8(%rsp) movq %r9,%r10 negq %r9 @@ -2760,7 +2760,7 @@ mulx4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry -.cfi_endproc +.cfi_endproc .size mulx4x_internal,.-mulx4x_internal .type bn_powerx5,@function .align 32 @@ -3519,7 +3519,7 @@ __bn_sqrx8x_reduction: .size bn_sqrx8x_internal,.-bn_sqrx8x_internal .align 32 __bn_postx4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 @@ -3567,13 +3567,13 @@ __bn_postx4x_internal: negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_postx4x_internal,.-__bn_postx4x_internal .globl bn_get_bits5 .type bn_get_bits5,@function .align 16 bn_get_bits5: -.cfi_startproc +.cfi_startproc leaq 0(%rdi),%r10 leaq 1(%rdi),%r11 movl %esi,%ecx @@ -3587,14 +3587,14 @@ bn_get_bits5: shrl %cl,%eax andl $31,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_get_bits5,.-bn_get_bits5 .globl bn_scatter5 .type bn_scatter5,@function .align 16 bn_scatter5: -.cfi_startproc +.cfi_startproc cmpl $0,%esi jz .Lscatter_epilogue leaq (%rdx,%rcx,8),%rdx @@ -3607,7 +3607,7 @@ bn_scatter5: jnz .Lscatter .Lscatter_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_scatter5,.-bn_scatter5 .globl bn_gather5 @@ -3615,7 +3615,7 @@ bn_scatter5: .align 32 bn_gather5: .LSEH_begin_bn_gather5: -.cfi_startproc +.cfi_startproc .byte 0x4c,0x8d,0x14,0x24 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 @@ -3773,7 +3773,7 @@ bn_gather5: leaq (%r10),%rsp .byte 0xf3,0xc3 .LSEH_end_bn_gather5: -.cfi_endproc +.cfi_endproc .size bn_gather5,.-bn_gather5 .align 64 .Linc: diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/buildinf.h index 498e7a6ba55a2e..768c45fd036e53 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-x86_64" -#define DATE "built on: Mon Mar 23 17:54:45 2020 UTC" +#define DATE "built on: Wed Apr 1 16:07:59 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/camellia/cmll-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/camellia/cmll-x86_64.s index eeb20dd2291da7..92056f8b1e0e0e 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/camellia/cmll-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/camellia/cmll-x86_64.s @@ -5,13 +5,13 @@ .type Camellia_EncryptBlock,@function .align 16 Camellia_EncryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Lenc_rounds -.cfi_endproc +.cfi_endproc .size Camellia_EncryptBlock,.-Camellia_EncryptBlock .globl Camellia_EncryptBlock_Rounds @@ -85,7 +85,7 @@ Camellia_EncryptBlock_Rounds: .type _x86_64_Camellia_encrypt,@function .align 16 _x86_64_Camellia_encrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -288,7 +288,7 @@ _x86_64_Camellia_encrypt: movl %edx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt @@ -296,13 +296,13 @@ _x86_64_Camellia_encrypt: .type Camellia_DecryptBlock,@function .align 16 Camellia_DecryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Ldec_rounds -.cfi_endproc +.cfi_endproc .size Camellia_DecryptBlock,.-Camellia_DecryptBlock .globl Camellia_DecryptBlock_Rounds @@ -376,7 +376,7 @@ Camellia_DecryptBlock_Rounds: .type _x86_64_Camellia_decrypt,@function .align 16 _x86_64_Camellia_decrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -580,7 +580,7 @@ _x86_64_Camellia_decrypt: movl %ebx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt .globl Camellia_Ekeygen .type Camellia_Ekeygen,@function diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s index 5c9e4050416212..80569cae04667e 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s @@ -3874,12 +3874,12 @@ ecp_nistz256_ord_sqr_montx: .type ecp_nistz256_to_mont,@function .align 32 ecp_nistz256_to_mont: -.cfi_startproc +.cfi_startproc movl $0x80100,%ecx andl OPENSSL_ia32cap_P+8(%rip),%ecx leaq .LRR(%rip),%rdx jmp .Lmul_mont -.cfi_endproc +.cfi_endproc .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont @@ -4823,7 +4823,7 @@ ecp_nistz256_from_mont: .type ecp_nistz256_scatter_w5,@function .align 32 ecp_nistz256_scatter_w5: -.cfi_startproc +.cfi_startproc leal -3(%rdx,%rdx,2),%edx movdqa 0(%rsi),%xmm0 shll $5,%edx @@ -4840,7 +4840,7 @@ ecp_nistz256_scatter_w5: movdqa %xmm5,80(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5 @@ -4914,7 +4914,7 @@ ecp_nistz256_gather_w5: .type ecp_nistz256_scatter_w7,@function .align 32 ecp_nistz256_scatter_w7: -.cfi_startproc +.cfi_startproc movdqu 0(%rsi),%xmm0 shll $6,%edx movdqu 16(%rsi),%xmm1 @@ -4926,7 +4926,7 @@ ecp_nistz256_scatter_w7: movdqa %xmm3,48(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7 diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/ec/x25519-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/ec/x25519-x86_64.s index 1788e568cda5d2..8fd319c83c880d 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/ec/x25519-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/ec/x25519-x86_64.s @@ -400,14 +400,14 @@ x25519_fe51_mul121666: .type x25519_fe64_eligible,@function .align 32 x25519_fe64_eligible: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+8(%rip),%ecx xorl %eax,%eax andl $0x80100,%ecx cmpl $0x80100,%ecx cmovel %ecx,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_eligible,.-x25519_fe64_eligible .globl x25519_fe64_mul @@ -650,7 +650,7 @@ x25519_fe64_sqr: .align 32 x25519_fe64_mul121666: .Lfe64_mul121666_body: -.cfi_startproc +.cfi_startproc movl $121666,%edx mulxq 0(%rsi),%r8,%rcx mulxq 8(%rsi),%r9,%rax @@ -679,7 +679,7 @@ x25519_fe64_mul121666: .Lfe64_mul121666_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_mul121666,.-x25519_fe64_mul121666 .globl x25519_fe64_add @@ -687,7 +687,7 @@ x25519_fe64_mul121666: .align 32 x25519_fe64_add: .Lfe64_add_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -716,7 +716,7 @@ x25519_fe64_add: .Lfe64_add_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_add,.-x25519_fe64_add .globl x25519_fe64_sub @@ -724,7 +724,7 @@ x25519_fe64_add: .align 32 x25519_fe64_sub: .Lfe64_sub_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -753,7 +753,7 @@ x25519_fe64_sub: .Lfe64_sub_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_sub,.-x25519_fe64_sub .globl x25519_fe64_tobytes @@ -761,7 +761,7 @@ x25519_fe64_sub: .align 32 x25519_fe64_tobytes: .Lfe64_to_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -797,6 +797,6 @@ x25519_fe64_tobytes: .Lfe64_to_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_tobytes,.-x25519_fe64_tobytes .byte 88,50,53,53,49,57,32,112,114,105,109,105,116,105,118,101,115,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/modes/aesni-gcm-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/modes/aesni-gcm-x86_64.s index 01d89630a42f73..bf508aff6ff6ec 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/modes/aesni-gcm-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/modes/aesni-gcm-x86_64.s @@ -3,7 +3,7 @@ .type _aesni_ctr32_ghash_6x,@function .align 32 _aesni_ctr32_ghash_6x: -.cfi_startproc +.cfi_startproc vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 @@ -311,7 +311,7 @@ _aesni_ctr32_ghash_6x: vpxor %xmm4,%xmm8,%xmm8 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x .globl aesni_gcm_decrypt .type aesni_gcm_decrypt,@function @@ -418,7 +418,7 @@ aesni_gcm_decrypt: .type _aesni_ctr32_6x,@function .align 32 _aesni_ctr32_6x: -.cfi_startproc +.cfi_startproc vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%rbp),%r13 @@ -505,7 +505,7 @@ _aesni_ctr32_6x: vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_6x,.-_aesni_ctr32_6x .globl aesni_gcm_encrypt diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/poly1305/poly1305-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/poly1305/poly1305-x86_64.s index 2636c52bbe5e48..8f2554e047f1bf 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/poly1305/poly1305-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/poly1305/poly1305-x86_64.s @@ -12,7 +12,7 @@ .type poly1305_init,@function .align 32 poly1305_init: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -43,7 +43,7 @@ poly1305_init: movl $1,%eax .Lno_key: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init,.-poly1305_init .type poly1305_blocks,@function @@ -164,7 +164,7 @@ poly1305_blocks: .type poly1305_emit,@function .align 32 poly1305_emit: -.cfi_startproc +.cfi_startproc .Lemit: movq 0(%rdi),%r8 movq 8(%rdi),%r9 @@ -185,12 +185,12 @@ poly1305_emit: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit,.-poly1305_emit .type __poly1305_block,@function .align 32 __poly1305_block: -.cfi_startproc +.cfi_startproc mulq %r14 movq %rax,%r9 movq %r11,%rax @@ -230,13 +230,13 @@ __poly1305_block: adcq $0,%rbx adcq $0,%rbp .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_block,.-__poly1305_block .type __poly1305_init_avx,@function .align 32 __poly1305_init_avx: -.cfi_startproc +.cfi_startproc movq %r11,%r14 movq %r12,%rbx xorq %rbp,%rbp @@ -394,7 +394,7 @@ __poly1305_init_avx: leaq -48-64(%rdi),%rdi .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_init_avx,.-__poly1305_init_avx .type poly1305_blocks_avx,@function @@ -1235,7 +1235,7 @@ poly1305_blocks_avx: .type poly1305_emit_avx,@function .align 32 poly1305_emit_avx: -.cfi_startproc +.cfi_startproc cmpl $0,20(%rdi) je .Lemit @@ -1286,7 +1286,7 @@ poly1305_emit_avx: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_avx,.-poly1305_emit_avx .type poly1305_blocks_avx2,@function .align 32 @@ -1969,7 +1969,7 @@ poly1305_blocks_avx2: .type xor128_encrypt_n_pad,@function .align 16 xor128_encrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -2011,14 +2011,14 @@ xor128_encrypt_n_pad: .Ldone_enc: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad .globl xor128_decrypt_n_pad .type xor128_decrypt_n_pad,@function .align 16 xor128_decrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -2064,5 +2064,5 @@ xor128_decrypt_n_pad: .Ldone_dec: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/rc4/rc4-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/rc4/rc4-x86_64.s index b97c757550aad0..d1d1eece70bf1e 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/rc4/rc4-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/rc4/rc4-x86_64.s @@ -5,7 +5,7 @@ .type RC4,@function .align 16 RC4: -.cfi_startproc +.cfi_startproc orq %rsi,%rsi jne .Lentry .byte 0xf3,0xc3 @@ -534,7 +534,7 @@ RC4: .type RC4_set_key,@function .align 16 RC4_set_key: -.cfi_startproc +.cfi_startproc leaq 8(%rdi),%rdi leaq (%rdx,%rsi,1),%rdx negq %rsi @@ -601,14 +601,14 @@ RC4_set_key: movl %eax,-8(%rdi) movl %eax,-4(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size RC4_set_key,.-RC4_set_key .globl RC4_options .type RC4_options,@function .align 16 RC4_options: -.cfi_startproc +.cfi_startproc leaq .Lopts(%rip),%rax movl OPENSSL_ia32cap_P(%rip),%edx btl $20,%edx @@ -621,7 +621,7 @@ RC4_options: addq $12,%rax .Ldone: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .align 64 .Lopts: .byte 114,99,52,40,56,120,44,105,110,116,41,0 diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/sha/keccak1600-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/sha/keccak1600-x86_64.s index 09617d014bdb7b..11f26e933d80b5 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/sha/keccak1600-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/sha/keccak1600-x86_64.s @@ -3,7 +3,7 @@ .type __KeccakF1600,@function .align 32 __KeccakF1600: -.cfi_startproc +.cfi_startproc movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx @@ -256,7 +256,7 @@ __KeccakF1600: leaq -192(%r15),%r15 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __KeccakF1600,.-__KeccakF1600 .type KeccakF1600,@function diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/sha/sha1-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/sha/sha1-x86_64.s index 98541727e555da..d4efc7206f57b3 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/sha/sha1-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/sha/sha1-x86_64.s @@ -1422,7 +1422,7 @@ _shaext_shortcut: movdqu %xmm0,(%rdi) movd %xmm1,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext .type sha1_block_data_order_ssse3,@function .align 16 diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/sha/sha256-x86_64.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/sha/sha256-x86_64.s index 9357385da3c49b..a7b60900fdd061 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/sha/sha256-x86_64.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/sha/sha256-x86_64.s @@ -1775,7 +1775,7 @@ K256: .align 64 sha256_block_data_order_shaext: _shaext_shortcut: -.cfi_startproc +.cfi_startproc leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 @@ -1978,7 +1978,7 @@ _shaext_shortcut: movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext .type sha256_block_data_order_ssse3,@function .align 64 diff --git a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/x86_64cpuid.s b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/x86_64cpuid.s index 9268ce8c9a9d63..748e6d161fa24a 100644 --- a/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/x86_64cpuid.s +++ b/deps/openssl/config/archs/linux-x86_64/asm_avx2/crypto/x86_64cpuid.s @@ -12,7 +12,7 @@ .type OPENSSL_atomic_add,@function .align 16 OPENSSL_atomic_add: -.cfi_startproc +.cfi_startproc movl (%rdi),%eax .Lspin: leaq (%rsi,%rax,1),%r8 .byte 0xf0 @@ -21,19 +21,19 @@ OPENSSL_atomic_add: movl %r8d,%eax .byte 0x48,0x98 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_atomic_add,.-OPENSSL_atomic_add .globl OPENSSL_rdtsc .type OPENSSL_rdtsc,@function .align 16 OPENSSL_rdtsc: -.cfi_startproc +.cfi_startproc rdtsc shlq $32,%rdx orq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_rdtsc,.-OPENSSL_rdtsc .globl OPENSSL_ia32_cpuid @@ -209,7 +209,7 @@ OPENSSL_ia32_cpuid: .type OPENSSL_cleanse,@function .align 16 OPENSSL_cleanse: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $15,%rsi jae .Lot @@ -239,14 +239,14 @@ OPENSSL_cleanse: cmpq $0,%rsi jne .Little .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_cleanse,.-OPENSSL_cleanse .globl CRYPTO_memcmp .type CRYPTO_memcmp,@function .align 16 CRYPTO_memcmp: -.cfi_startproc +.cfi_startproc xorq %rax,%rax xorq %r10,%r10 cmpq $0,%rdx @@ -275,13 +275,13 @@ CRYPTO_memcmp: shrq $63,%rax .Lno_data: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size CRYPTO_memcmp,.-CRYPTO_memcmp .globl OPENSSL_wipe_cpu .type OPENSSL_wipe_cpu,@function .align 16 OPENSSL_wipe_cpu: -.cfi_startproc +.cfi_startproc pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 @@ -308,13 +308,13 @@ OPENSSL_wipe_cpu: xorq %r11,%r11 leaq 8(%rsp),%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu .globl OPENSSL_instrument_bus .type OPENSSL_instrument_bus,@function .align 16 OPENSSL_instrument_bus: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rsi,%r11 @@ -341,14 +341,14 @@ OPENSSL_instrument_bus: movq %r11,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus,.-OPENSSL_instrument_bus .globl OPENSSL_instrument_bus2 .type OPENSSL_instrument_bus2,@function .align 16 OPENSSL_instrument_bus2: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rdx,%r11 @@ -391,13 +391,13 @@ OPENSSL_instrument_bus2: movq 8(%rsp),%rax subq %rcx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus2,.-OPENSSL_instrument_bus2 .globl OPENSSL_ia32_rdrand_bytes .type OPENSSL_ia32_rdrand_bytes,@function .align 16 OPENSSL_ia32_rdrand_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdrand_bytes @@ -434,13 +434,13 @@ OPENSSL_ia32_rdrand_bytes: .Ldone_rdrand_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdrand_bytes,.-OPENSSL_ia32_rdrand_bytes .globl OPENSSL_ia32_rdseed_bytes .type OPENSSL_ia32_rdseed_bytes,@function .align 16 OPENSSL_ia32_rdseed_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdseed_bytes @@ -477,5 +477,5 @@ OPENSSL_ia32_rdseed_bytes: .Ldone_rdseed_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdseed_bytes,.-OPENSSL_ia32_rdseed_bytes diff --git a/deps/openssl/config/archs/linux-x86_64/no-asm/configdata.pm b/deps/openssl/config/archs/linux-x86_64/no-asm/configdata.pm index f03026a729cd66..24cfc38f947044 100644 --- a/deps/openssl/config/archs/linux-x86_64/no-asm/configdata.pm +++ b/deps/openssl/config/archs/linux-x86_64/no-asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux-x86_64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux-x86_64/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/linux-x86_64/no-asm/crypto/buildinf.h index 31bedb24523989..c9c02dc764003c 100644 --- a/deps/openssl/config/archs/linux-x86_64/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux-x86_64/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux-x86_64" -#define DATE "built on: Mon Mar 23 17:55:21 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:08 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux32-s390x/asm/configdata.pm b/deps/openssl/config/archs/linux32-s390x/asm/configdata.pm index 78c56a17d42a59..069a165950e8db 100644 --- a/deps/openssl/config/archs/linux32-s390x/asm/configdata.pm +++ b/deps/openssl/config/archs/linux32-s390x/asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux32-s390x", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux32-s390x/asm/crypto/buildinf.h b/deps/openssl/config/archs/linux32-s390x/asm/crypto/buildinf.h index 69bc7d12fbe7ed..1762b032a3cb4e 100644 --- a/deps/openssl/config/archs/linux32-s390x/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux32-s390x/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux32-s390x" -#define DATE "built on: Mon Mar 23 17:57:25 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:37 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux32-s390x/asm_avx2/configdata.pm b/deps/openssl/config/archs/linux32-s390x/asm_avx2/configdata.pm index 220e70e87a7a3d..4cd3f3b147eb76 100644 --- a/deps/openssl/config/archs/linux32-s390x/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/linux32-s390x/asm_avx2/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux32-s390x", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux32-s390x/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/linux32-s390x/asm_avx2/crypto/buildinf.h index 3e27747bd80652..c649d988bbbeb0 100644 --- a/deps/openssl/config/archs/linux32-s390x/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux32-s390x/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux32-s390x" -#define DATE "built on: Mon Mar 23 17:57:36 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:40 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux32-s390x/no-asm/configdata.pm b/deps/openssl/config/archs/linux32-s390x/no-asm/configdata.pm index 80a6256c0b0458..c73e3b3acb01ec 100644 --- a/deps/openssl/config/archs/linux32-s390x/no-asm/configdata.pm +++ b/deps/openssl/config/archs/linux32-s390x/no-asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux32-s390x", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux32-s390x/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/linux32-s390x/no-asm/crypto/buildinf.h index f16c39b54aeb18..f988c2259eddf1 100644 --- a/deps/openssl/config/archs/linux32-s390x/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux32-s390x/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux32-s390x" -#define DATE "built on: Mon Mar 23 17:57:44 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:42 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux64-mips64/asm/configdata.pm b/deps/openssl/config/archs/linux64-mips64/asm/configdata.pm index 1302d730d02d59..130842f418b146 100644 --- a/deps/openssl/config/archs/linux64-mips64/asm/configdata.pm +++ b/deps/openssl/config/archs/linux64-mips64/asm/configdata.pm @@ -62,7 +62,7 @@ our %config = ( options => "enable-ssl-trace no-afalgeng no-asan no-buildtest-c++ no-comp no-crypto-mdebug no-crypto-mdebug-backtrace no-devcryptoeng no-dynamic-engine no-ec_nistp_64_gcc_128 no-egd no-external-tests no-fuzz-afl no-fuzz-libfuzzer no-heartbeats no-md2 no-msan no-rc5 no-sctp no-shared no-ssl3 no-ssl3-method no-ubsan no-unit-test no-weak-ssl-ciphers no-zlib no-zlib-dynamic", perl_archname => "x86_64-linux-gnu-thread-multi", perl_cmd => "/usr/bin/perl", - perl_version => "5.28.1", + perl_version => "5.26.1", perlargv => [ "no-comp", "no-shared", "no-afalgeng", "enable-ssl-trace", "linux64-mips64" ], perlenv => { "AR" => undef, @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux64-mips64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -15356,3 +15356,4 @@ Verbose output. =back =cut + diff --git a/deps/openssl/config/archs/linux64-mips64/asm/crypto/buildinf.h b/deps/openssl/config/archs/linux64-mips64/asm/crypto/buildinf.h index 5f969bc3a4bfc0..e34c4f639e20a3 100644 --- a/deps/openssl/config/archs/linux64-mips64/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux64-mips64/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux64-mips64" -#define DATE "built on: Wed Mar 18 21:09:33 2020 UTC" +#define DATE "built on: Tue Mar 31 13:08:19 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux64-mips64/asm/crypto/sha/sha256-mips.S b/deps/openssl/config/archs/linux64-mips64/asm/crypto/sha/sha256-mips.S new file mode 100644 index 00000000000000..2906baaf6e155f --- /dev/null +++ b/deps/openssl/config/archs/linux64-mips64/asm/crypto/sha/sha256-mips.S @@ -0,0 +1,3037 @@ +#include "mips_arch.h" + +.text +.set noat +#if !defined(__mips_eabi) && (!defined(__vxworks) || defined(__pic__)) +.option pic2 +#endif + +.align 5 +.globl sha256_block_data_order +.ent sha256_block_data_order +sha256_block_data_order: + .frame $29,192,$31 + .mask 0xc0ff0000,-8 + .set noreorder + dsubu $29,192 + sd $31,192-1*8($29) + sd $30,192-2*8($29) + sd $23,192-3*8($29) + sd $22,192-4*8($29) + sd $21,192-5*8($29) + sd $20,192-6*8($29) + sd $19,192-7*8($29) + sd $18,192-8*8($29) + sd $17,192-9*8($29) + sd $16,192-10*8($29) + dsll $23,$6,6 + .cplocal $6 + .cpsetup $25,$0,sha256_block_data_order + .set reorder + dla $6,K256 # PIC-ified 'load address' + + lw $1,0*4($4) # load context + lw $2,1*4($4) + lw $3,2*4($4) + lw $7,3*4($4) + lw $24,4*4($4) + lw $25,5*4($4) + lw $30,6*4($4) + lw $31,7*4($4) + + daddu $23,$5 # pointer to the end of input + sd $23,16*4($29) + b .Loop + +.align 5 +.Loop: +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $8,($5) +#else + lwl $8,3($5) + lwr $8,0($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $9,4($5) +#else + lwl $9,7($5) + lwr $9,4($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $8,$8 # byte swap(0) + rotr $8,$8,16 +#else + srl $13,$8,24 # byte swap(0) + srl $14,$8,8 + andi $15,$8,0xFF00 + sll $8,$8,24 + andi $14,0xFF00 + sll $15,$15,8 + or $8,$13 + or $14,$15 + or $8,$14 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $15,$25,$30 # 0 + rotr $13,$24,6 + addu $12,$8,$31 + rotr $14,$24,11 + and $15,$24 + rotr $31,$24,25 + xor $13,$14 + rotr $14,$1,2 + xor $15,$30 # Ch(e,f,g) + xor $13,$31 # Sigma1(e) + + rotr $31,$1,13 + addu $12,$15 + lw $15,0($6) # K[0] + xor $31,$14 + rotr $14,$1,22 + addu $12,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#else + addu $12,$8,$31 # 0 + srl $31,$24,6 + xor $15,$25,$30 + sll $14,$24,7 + and $15,$24 + srl $13,$24,11 + xor $31,$14 + sll $14,$24,21 + xor $31,$13 + srl $13,$24,25 + xor $31,$14 + sll $14,$24,26 + xor $31,$13 + xor $15,$30 # Ch(e,f,g) + xor $13,$14,$31 # Sigma1(e) + + srl $31,$1,2 + addu $12,$15 + lw $15,0($6) # K[0] + sll $14,$1,10 + addu $12,$13 + srl $13,$1,13 + xor $31,$14 + sll $14,$1,19 + xor $31,$13 + srl $13,$1,22 + xor $31,$14 + sll $14,$1,30 + xor $31,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#endif + sw $8,0($29) # offload to ring buffer + addu $31,$13 + and $14,$1 + addu $12,$15 # +=K[0] + addu $31,$14 # +=Maj(a,b,c) + addu $7,$12 + addu $31,$12 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $10,8($5) +#else + lwl $10,11($5) + lwr $10,8($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $9,$9 # byte swap(1) + rotr $9,$9,16 +#else + srl $14,$9,24 # byte swap(1) + srl $15,$9,8 + andi $16,$9,0xFF00 + sll $9,$9,24 + andi $15,0xFF00 + sll $16,$16,8 + or $9,$14 + or $15,$16 + or $9,$15 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $16,$24,$25 # 1 + rotr $14,$7,6 + addu $13,$9,$30 + rotr $15,$7,11 + and $16,$7 + rotr $30,$7,25 + xor $14,$15 + rotr $15,$31,2 + xor $16,$25 # Ch(e,f,g) + xor $14,$30 # Sigma1(e) + + rotr $30,$31,13 + addu $13,$16 + lw $16,4($6) # K[1] + xor $30,$15 + rotr $15,$31,22 + addu $13,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#else + addu $13,$9,$30 # 1 + srl $30,$7,6 + xor $16,$24,$25 + sll $15,$7,7 + and $16,$7 + srl $14,$7,11 + xor $30,$15 + sll $15,$7,21 + xor $30,$14 + srl $14,$7,25 + xor $30,$15 + sll $15,$7,26 + xor $30,$14 + xor $16,$25 # Ch(e,f,g) + xor $14,$15,$30 # Sigma1(e) + + srl $30,$31,2 + addu $13,$16 + lw $16,4($6) # K[1] + sll $15,$31,10 + addu $13,$14 + srl $14,$31,13 + xor $30,$15 + sll $15,$31,19 + xor $30,$14 + srl $14,$31,22 + xor $30,$15 + sll $15,$31,30 + xor $30,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#endif + sw $9,4($29) # offload to ring buffer + addu $30,$14 + and $15,$31 + addu $13,$16 # +=K[1] + addu $30,$15 # +=Maj(a,b,c) + addu $3,$13 + addu $30,$13 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $11,12($5) +#else + lwl $11,15($5) + lwr $11,12($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $10,$10 # byte swap(2) + rotr $10,$10,16 +#else + srl $15,$10,24 # byte swap(2) + srl $16,$10,8 + andi $17,$10,0xFF00 + sll $10,$10,24 + andi $16,0xFF00 + sll $17,$17,8 + or $10,$15 + or $16,$17 + or $10,$16 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $17,$7,$24 # 2 + rotr $15,$3,6 + addu $14,$10,$25 + rotr $16,$3,11 + and $17,$3 + rotr $25,$3,25 + xor $15,$16 + rotr $16,$30,2 + xor $17,$24 # Ch(e,f,g) + xor $15,$25 # Sigma1(e) + + rotr $25,$30,13 + addu $14,$17 + lw $17,8($6) # K[2] + xor $25,$16 + rotr $16,$30,22 + addu $14,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#else + addu $14,$10,$25 # 2 + srl $25,$3,6 + xor $17,$7,$24 + sll $16,$3,7 + and $17,$3 + srl $15,$3,11 + xor $25,$16 + sll $16,$3,21 + xor $25,$15 + srl $15,$3,25 + xor $25,$16 + sll $16,$3,26 + xor $25,$15 + xor $17,$24 # Ch(e,f,g) + xor $15,$16,$25 # Sigma1(e) + + srl $25,$30,2 + addu $14,$17 + lw $17,8($6) # K[2] + sll $16,$30,10 + addu $14,$15 + srl $15,$30,13 + xor $25,$16 + sll $16,$30,19 + xor $25,$15 + srl $15,$30,22 + xor $25,$16 + sll $16,$30,30 + xor $25,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#endif + sw $10,8($29) # offload to ring buffer + addu $25,$15 + and $16,$30 + addu $14,$17 # +=K[2] + addu $25,$16 # +=Maj(a,b,c) + addu $2,$14 + addu $25,$14 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $12,16($5) +#else + lwl $12,19($5) + lwr $12,16($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $11,$11 # byte swap(3) + rotr $11,$11,16 +#else + srl $16,$11,24 # byte swap(3) + srl $17,$11,8 + andi $18,$11,0xFF00 + sll $11,$11,24 + andi $17,0xFF00 + sll $18,$18,8 + or $11,$16 + or $17,$18 + or $11,$17 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $18,$3,$7 # 3 + rotr $16,$2,6 + addu $15,$11,$24 + rotr $17,$2,11 + and $18,$2 + rotr $24,$2,25 + xor $16,$17 + rotr $17,$25,2 + xor $18,$7 # Ch(e,f,g) + xor $16,$24 # Sigma1(e) + + rotr $24,$25,13 + addu $15,$18 + lw $18,12($6) # K[3] + xor $24,$17 + rotr $17,$25,22 + addu $15,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#else + addu $15,$11,$24 # 3 + srl $24,$2,6 + xor $18,$3,$7 + sll $17,$2,7 + and $18,$2 + srl $16,$2,11 + xor $24,$17 + sll $17,$2,21 + xor $24,$16 + srl $16,$2,25 + xor $24,$17 + sll $17,$2,26 + xor $24,$16 + xor $18,$7 # Ch(e,f,g) + xor $16,$17,$24 # Sigma1(e) + + srl $24,$25,2 + addu $15,$18 + lw $18,12($6) # K[3] + sll $17,$25,10 + addu $15,$16 + srl $16,$25,13 + xor $24,$17 + sll $17,$25,19 + xor $24,$16 + srl $16,$25,22 + xor $24,$17 + sll $17,$25,30 + xor $24,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#endif + sw $11,12($29) # offload to ring buffer + addu $24,$16 + and $17,$25 + addu $15,$18 # +=K[3] + addu $24,$17 # +=Maj(a,b,c) + addu $1,$15 + addu $24,$15 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $13,20($5) +#else + lwl $13,23($5) + lwr $13,20($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $12,$12 # byte swap(4) + rotr $12,$12,16 +#else + srl $17,$12,24 # byte swap(4) + srl $18,$12,8 + andi $19,$12,0xFF00 + sll $12,$12,24 + andi $18,0xFF00 + sll $19,$19,8 + or $12,$17 + or $18,$19 + or $12,$18 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $19,$2,$3 # 4 + rotr $17,$1,6 + addu $16,$12,$7 + rotr $18,$1,11 + and $19,$1 + rotr $7,$1,25 + xor $17,$18 + rotr $18,$24,2 + xor $19,$3 # Ch(e,f,g) + xor $17,$7 # Sigma1(e) + + rotr $7,$24,13 + addu $16,$19 + lw $19,16($6) # K[4] + xor $7,$18 + rotr $18,$24,22 + addu $16,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#else + addu $16,$12,$7 # 4 + srl $7,$1,6 + xor $19,$2,$3 + sll $18,$1,7 + and $19,$1 + srl $17,$1,11 + xor $7,$18 + sll $18,$1,21 + xor $7,$17 + srl $17,$1,25 + xor $7,$18 + sll $18,$1,26 + xor $7,$17 + xor $19,$3 # Ch(e,f,g) + xor $17,$18,$7 # Sigma1(e) + + srl $7,$24,2 + addu $16,$19 + lw $19,16($6) # K[4] + sll $18,$24,10 + addu $16,$17 + srl $17,$24,13 + xor $7,$18 + sll $18,$24,19 + xor $7,$17 + srl $17,$24,22 + xor $7,$18 + sll $18,$24,30 + xor $7,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#endif + sw $12,16($29) # offload to ring buffer + addu $7,$17 + and $18,$24 + addu $16,$19 # +=K[4] + addu $7,$18 # +=Maj(a,b,c) + addu $31,$16 + addu $7,$16 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $14,24($5) +#else + lwl $14,27($5) + lwr $14,24($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $13,$13 # byte swap(5) + rotr $13,$13,16 +#else + srl $18,$13,24 # byte swap(5) + srl $19,$13,8 + andi $20,$13,0xFF00 + sll $13,$13,24 + andi $19,0xFF00 + sll $20,$20,8 + or $13,$18 + or $19,$20 + or $13,$19 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $20,$1,$2 # 5 + rotr $18,$31,6 + addu $17,$13,$3 + rotr $19,$31,11 + and $20,$31 + rotr $3,$31,25 + xor $18,$19 + rotr $19,$7,2 + xor $20,$2 # Ch(e,f,g) + xor $18,$3 # Sigma1(e) + + rotr $3,$7,13 + addu $17,$20 + lw $20,20($6) # K[5] + xor $3,$19 + rotr $19,$7,22 + addu $17,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#else + addu $17,$13,$3 # 5 + srl $3,$31,6 + xor $20,$1,$2 + sll $19,$31,7 + and $20,$31 + srl $18,$31,11 + xor $3,$19 + sll $19,$31,21 + xor $3,$18 + srl $18,$31,25 + xor $3,$19 + sll $19,$31,26 + xor $3,$18 + xor $20,$2 # Ch(e,f,g) + xor $18,$19,$3 # Sigma1(e) + + srl $3,$7,2 + addu $17,$20 + lw $20,20($6) # K[5] + sll $19,$7,10 + addu $17,$18 + srl $18,$7,13 + xor $3,$19 + sll $19,$7,19 + xor $3,$18 + srl $18,$7,22 + xor $3,$19 + sll $19,$7,30 + xor $3,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#endif + sw $13,20($29) # offload to ring buffer + addu $3,$18 + and $19,$7 + addu $17,$20 # +=K[5] + addu $3,$19 # +=Maj(a,b,c) + addu $30,$17 + addu $3,$17 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $15,28($5) +#else + lwl $15,31($5) + lwr $15,28($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $14,$14 # byte swap(6) + rotr $14,$14,16 +#else + srl $19,$14,24 # byte swap(6) + srl $20,$14,8 + andi $21,$14,0xFF00 + sll $14,$14,24 + andi $20,0xFF00 + sll $21,$21,8 + or $14,$19 + or $20,$21 + or $14,$20 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $21,$31,$1 # 6 + rotr $19,$30,6 + addu $18,$14,$2 + rotr $20,$30,11 + and $21,$30 + rotr $2,$30,25 + xor $19,$20 + rotr $20,$3,2 + xor $21,$1 # Ch(e,f,g) + xor $19,$2 # Sigma1(e) + + rotr $2,$3,13 + addu $18,$21 + lw $21,24($6) # K[6] + xor $2,$20 + rotr $20,$3,22 + addu $18,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#else + addu $18,$14,$2 # 6 + srl $2,$30,6 + xor $21,$31,$1 + sll $20,$30,7 + and $21,$30 + srl $19,$30,11 + xor $2,$20 + sll $20,$30,21 + xor $2,$19 + srl $19,$30,25 + xor $2,$20 + sll $20,$30,26 + xor $2,$19 + xor $21,$1 # Ch(e,f,g) + xor $19,$20,$2 # Sigma1(e) + + srl $2,$3,2 + addu $18,$21 + lw $21,24($6) # K[6] + sll $20,$3,10 + addu $18,$19 + srl $19,$3,13 + xor $2,$20 + sll $20,$3,19 + xor $2,$19 + srl $19,$3,22 + xor $2,$20 + sll $20,$3,30 + xor $2,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#endif + sw $14,24($29) # offload to ring buffer + addu $2,$19 + and $20,$3 + addu $18,$21 # +=K[6] + addu $2,$20 # +=Maj(a,b,c) + addu $25,$18 + addu $2,$18 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $16,32($5) +#else + lwl $16,35($5) + lwr $16,32($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $15,$15 # byte swap(7) + rotr $15,$15,16 +#else + srl $20,$15,24 # byte swap(7) + srl $21,$15,8 + andi $22,$15,0xFF00 + sll $15,$15,24 + andi $21,0xFF00 + sll $22,$22,8 + or $15,$20 + or $21,$22 + or $15,$21 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $22,$30,$31 # 7 + rotr $20,$25,6 + addu $19,$15,$1 + rotr $21,$25,11 + and $22,$25 + rotr $1,$25,25 + xor $20,$21 + rotr $21,$2,2 + xor $22,$31 # Ch(e,f,g) + xor $20,$1 # Sigma1(e) + + rotr $1,$2,13 + addu $19,$22 + lw $22,28($6) # K[7] + xor $1,$21 + rotr $21,$2,22 + addu $19,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#else + addu $19,$15,$1 # 7 + srl $1,$25,6 + xor $22,$30,$31 + sll $21,$25,7 + and $22,$25 + srl $20,$25,11 + xor $1,$21 + sll $21,$25,21 + xor $1,$20 + srl $20,$25,25 + xor $1,$21 + sll $21,$25,26 + xor $1,$20 + xor $22,$31 # Ch(e,f,g) + xor $20,$21,$1 # Sigma1(e) + + srl $1,$2,2 + addu $19,$22 + lw $22,28($6) # K[7] + sll $21,$2,10 + addu $19,$20 + srl $20,$2,13 + xor $1,$21 + sll $21,$2,19 + xor $1,$20 + srl $20,$2,22 + xor $1,$21 + sll $21,$2,30 + xor $1,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#endif + sw $15,28($29) # offload to ring buffer + addu $1,$20 + and $21,$2 + addu $19,$22 # +=K[7] + addu $1,$21 # +=Maj(a,b,c) + addu $24,$19 + addu $1,$19 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $17,36($5) +#else + lwl $17,39($5) + lwr $17,36($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $16,$16 # byte swap(8) + rotr $16,$16,16 +#else + srl $21,$16,24 # byte swap(8) + srl $22,$16,8 + andi $23,$16,0xFF00 + sll $16,$16,24 + andi $22,0xFF00 + sll $23,$23,8 + or $16,$21 + or $22,$23 + or $16,$22 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $23,$25,$30 # 8 + rotr $21,$24,6 + addu $20,$16,$31 + rotr $22,$24,11 + and $23,$24 + rotr $31,$24,25 + xor $21,$22 + rotr $22,$1,2 + xor $23,$30 # Ch(e,f,g) + xor $21,$31 # Sigma1(e) + + rotr $31,$1,13 + addu $20,$23 + lw $23,32($6) # K[8] + xor $31,$22 + rotr $22,$1,22 + addu $20,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#else + addu $20,$16,$31 # 8 + srl $31,$24,6 + xor $23,$25,$30 + sll $22,$24,7 + and $23,$24 + srl $21,$24,11 + xor $31,$22 + sll $22,$24,21 + xor $31,$21 + srl $21,$24,25 + xor $31,$22 + sll $22,$24,26 + xor $31,$21 + xor $23,$30 # Ch(e,f,g) + xor $21,$22,$31 # Sigma1(e) + + srl $31,$1,2 + addu $20,$23 + lw $23,32($6) # K[8] + sll $22,$1,10 + addu $20,$21 + srl $21,$1,13 + xor $31,$22 + sll $22,$1,19 + xor $31,$21 + srl $21,$1,22 + xor $31,$22 + sll $22,$1,30 + xor $31,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#endif + sw $16,32($29) # offload to ring buffer + addu $31,$21 + and $22,$1 + addu $20,$23 # +=K[8] + addu $31,$22 # +=Maj(a,b,c) + addu $7,$20 + addu $31,$20 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $18,40($5) +#else + lwl $18,43($5) + lwr $18,40($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $17,$17 # byte swap(9) + rotr $17,$17,16 +#else + srl $22,$17,24 # byte swap(9) + srl $23,$17,8 + andi $8,$17,0xFF00 + sll $17,$17,24 + andi $23,0xFF00 + sll $8,$8,8 + or $17,$22 + or $23,$8 + or $17,$23 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $8,$24,$25 # 9 + rotr $22,$7,6 + addu $21,$17,$30 + rotr $23,$7,11 + and $8,$7 + rotr $30,$7,25 + xor $22,$23 + rotr $23,$31,2 + xor $8,$25 # Ch(e,f,g) + xor $22,$30 # Sigma1(e) + + rotr $30,$31,13 + addu $21,$8 + lw $8,36($6) # K[9] + xor $30,$23 + rotr $23,$31,22 + addu $21,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#else + addu $21,$17,$30 # 9 + srl $30,$7,6 + xor $8,$24,$25 + sll $23,$7,7 + and $8,$7 + srl $22,$7,11 + xor $30,$23 + sll $23,$7,21 + xor $30,$22 + srl $22,$7,25 + xor $30,$23 + sll $23,$7,26 + xor $30,$22 + xor $8,$25 # Ch(e,f,g) + xor $22,$23,$30 # Sigma1(e) + + srl $30,$31,2 + addu $21,$8 + lw $8,36($6) # K[9] + sll $23,$31,10 + addu $21,$22 + srl $22,$31,13 + xor $30,$23 + sll $23,$31,19 + xor $30,$22 + srl $22,$31,22 + xor $30,$23 + sll $23,$31,30 + xor $30,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#endif + sw $17,36($29) # offload to ring buffer + addu $30,$22 + and $23,$31 + addu $21,$8 # +=K[9] + addu $30,$23 # +=Maj(a,b,c) + addu $3,$21 + addu $30,$21 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $19,44($5) +#else + lwl $19,47($5) + lwr $19,44($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $18,$18 # byte swap(10) + rotr $18,$18,16 +#else + srl $23,$18,24 # byte swap(10) + srl $8,$18,8 + andi $9,$18,0xFF00 + sll $18,$18,24 + andi $8,0xFF00 + sll $9,$9,8 + or $18,$23 + or $8,$9 + or $18,$8 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $9,$7,$24 # 10 + rotr $23,$3,6 + addu $22,$18,$25 + rotr $8,$3,11 + and $9,$3 + rotr $25,$3,25 + xor $23,$8 + rotr $8,$30,2 + xor $9,$24 # Ch(e,f,g) + xor $23,$25 # Sigma1(e) + + rotr $25,$30,13 + addu $22,$9 + lw $9,40($6) # K[10] + xor $25,$8 + rotr $8,$30,22 + addu $22,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#else + addu $22,$18,$25 # 10 + srl $25,$3,6 + xor $9,$7,$24 + sll $8,$3,7 + and $9,$3 + srl $23,$3,11 + xor $25,$8 + sll $8,$3,21 + xor $25,$23 + srl $23,$3,25 + xor $25,$8 + sll $8,$3,26 + xor $25,$23 + xor $9,$24 # Ch(e,f,g) + xor $23,$8,$25 # Sigma1(e) + + srl $25,$30,2 + addu $22,$9 + lw $9,40($6) # K[10] + sll $8,$30,10 + addu $22,$23 + srl $23,$30,13 + xor $25,$8 + sll $8,$30,19 + xor $25,$23 + srl $23,$30,22 + xor $25,$8 + sll $8,$30,30 + xor $25,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#endif + sw $18,40($29) # offload to ring buffer + addu $25,$23 + and $8,$30 + addu $22,$9 # +=K[10] + addu $25,$8 # +=Maj(a,b,c) + addu $2,$22 + addu $25,$22 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $20,48($5) +#else + lwl $20,51($5) + lwr $20,48($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $19,$19 # byte swap(11) + rotr $19,$19,16 +#else + srl $8,$19,24 # byte swap(11) + srl $9,$19,8 + andi $10,$19,0xFF00 + sll $19,$19,24 + andi $9,0xFF00 + sll $10,$10,8 + or $19,$8 + or $9,$10 + or $19,$9 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $10,$3,$7 # 11 + rotr $8,$2,6 + addu $23,$19,$24 + rotr $9,$2,11 + and $10,$2 + rotr $24,$2,25 + xor $8,$9 + rotr $9,$25,2 + xor $10,$7 # Ch(e,f,g) + xor $8,$24 # Sigma1(e) + + rotr $24,$25,13 + addu $23,$10 + lw $10,44($6) # K[11] + xor $24,$9 + rotr $9,$25,22 + addu $23,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#else + addu $23,$19,$24 # 11 + srl $24,$2,6 + xor $10,$3,$7 + sll $9,$2,7 + and $10,$2 + srl $8,$2,11 + xor $24,$9 + sll $9,$2,21 + xor $24,$8 + srl $8,$2,25 + xor $24,$9 + sll $9,$2,26 + xor $24,$8 + xor $10,$7 # Ch(e,f,g) + xor $8,$9,$24 # Sigma1(e) + + srl $24,$25,2 + addu $23,$10 + lw $10,44($6) # K[11] + sll $9,$25,10 + addu $23,$8 + srl $8,$25,13 + xor $24,$9 + sll $9,$25,19 + xor $24,$8 + srl $8,$25,22 + xor $24,$9 + sll $9,$25,30 + xor $24,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#endif + sw $19,44($29) # offload to ring buffer + addu $24,$8 + and $9,$25 + addu $23,$10 # +=K[11] + addu $24,$9 # +=Maj(a,b,c) + addu $1,$23 + addu $24,$23 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $21,52($5) +#else + lwl $21,55($5) + lwr $21,52($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $20,$20 # byte swap(12) + rotr $20,$20,16 +#else + srl $9,$20,24 # byte swap(12) + srl $10,$20,8 + andi $11,$20,0xFF00 + sll $20,$20,24 + andi $10,0xFF00 + sll $11,$11,8 + or $20,$9 + or $10,$11 + or $20,$10 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $11,$2,$3 # 12 + rotr $9,$1,6 + addu $8,$20,$7 + rotr $10,$1,11 + and $11,$1 + rotr $7,$1,25 + xor $9,$10 + rotr $10,$24,2 + xor $11,$3 # Ch(e,f,g) + xor $9,$7 # Sigma1(e) + + rotr $7,$24,13 + addu $8,$11 + lw $11,48($6) # K[12] + xor $7,$10 + rotr $10,$24,22 + addu $8,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#else + addu $8,$20,$7 # 12 + srl $7,$1,6 + xor $11,$2,$3 + sll $10,$1,7 + and $11,$1 + srl $9,$1,11 + xor $7,$10 + sll $10,$1,21 + xor $7,$9 + srl $9,$1,25 + xor $7,$10 + sll $10,$1,26 + xor $7,$9 + xor $11,$3 # Ch(e,f,g) + xor $9,$10,$7 # Sigma1(e) + + srl $7,$24,2 + addu $8,$11 + lw $11,48($6) # K[12] + sll $10,$24,10 + addu $8,$9 + srl $9,$24,13 + xor $7,$10 + sll $10,$24,19 + xor $7,$9 + srl $9,$24,22 + xor $7,$10 + sll $10,$24,30 + xor $7,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#endif + sw $20,48($29) # offload to ring buffer + addu $7,$9 + and $10,$24 + addu $8,$11 # +=K[12] + addu $7,$10 # +=Maj(a,b,c) + addu $31,$8 + addu $7,$8 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $22,56($5) +#else + lwl $22,59($5) + lwr $22,56($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $21,$21 # byte swap(13) + rotr $21,$21,16 +#else + srl $10,$21,24 # byte swap(13) + srl $11,$21,8 + andi $12,$21,0xFF00 + sll $21,$21,24 + andi $11,0xFF00 + sll $12,$12,8 + or $21,$10 + or $11,$12 + or $21,$11 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $12,$1,$2 # 13 + rotr $10,$31,6 + addu $9,$21,$3 + rotr $11,$31,11 + and $12,$31 + rotr $3,$31,25 + xor $10,$11 + rotr $11,$7,2 + xor $12,$2 # Ch(e,f,g) + xor $10,$3 # Sigma1(e) + + rotr $3,$7,13 + addu $9,$12 + lw $12,52($6) # K[13] + xor $3,$11 + rotr $11,$7,22 + addu $9,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#else + addu $9,$21,$3 # 13 + srl $3,$31,6 + xor $12,$1,$2 + sll $11,$31,7 + and $12,$31 + srl $10,$31,11 + xor $3,$11 + sll $11,$31,21 + xor $3,$10 + srl $10,$31,25 + xor $3,$11 + sll $11,$31,26 + xor $3,$10 + xor $12,$2 # Ch(e,f,g) + xor $10,$11,$3 # Sigma1(e) + + srl $3,$7,2 + addu $9,$12 + lw $12,52($6) # K[13] + sll $11,$7,10 + addu $9,$10 + srl $10,$7,13 + xor $3,$11 + sll $11,$7,19 + xor $3,$10 + srl $10,$7,22 + xor $3,$11 + sll $11,$7,30 + xor $3,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#endif + sw $21,52($29) # offload to ring buffer + addu $3,$10 + and $11,$7 + addu $9,$12 # +=K[13] + addu $3,$11 # +=Maj(a,b,c) + addu $30,$9 + addu $3,$9 + lw $8,0($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $23,60($5) +#else + lwl $23,63($5) + lwr $23,60($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $22,$22 # byte swap(14) + rotr $22,$22,16 +#else + srl $11,$22,24 # byte swap(14) + srl $12,$22,8 + andi $13,$22,0xFF00 + sll $22,$22,24 + andi $12,0xFF00 + sll $13,$13,8 + or $22,$11 + or $12,$13 + or $22,$12 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $13,$31,$1 # 14 + rotr $11,$30,6 + addu $10,$22,$2 + rotr $12,$30,11 + and $13,$30 + rotr $2,$30,25 + xor $11,$12 + rotr $12,$3,2 + xor $13,$1 # Ch(e,f,g) + xor $11,$2 # Sigma1(e) + + rotr $2,$3,13 + addu $10,$13 + lw $13,56($6) # K[14] + xor $2,$12 + rotr $12,$3,22 + addu $10,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#else + addu $10,$22,$2 # 14 + srl $2,$30,6 + xor $13,$31,$1 + sll $12,$30,7 + and $13,$30 + srl $11,$30,11 + xor $2,$12 + sll $12,$30,21 + xor $2,$11 + srl $11,$30,25 + xor $2,$12 + sll $12,$30,26 + xor $2,$11 + xor $13,$1 # Ch(e,f,g) + xor $11,$12,$2 # Sigma1(e) + + srl $2,$3,2 + addu $10,$13 + lw $13,56($6) # K[14] + sll $12,$3,10 + addu $10,$11 + srl $11,$3,13 + xor $2,$12 + sll $12,$3,19 + xor $2,$11 + srl $11,$3,22 + xor $2,$12 + sll $12,$3,30 + xor $2,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#endif + sw $22,56($29) # offload to ring buffer + addu $2,$11 + and $12,$3 + addu $10,$13 # +=K[14] + addu $2,$12 # +=Maj(a,b,c) + addu $25,$10 + addu $2,$10 + lw $9,4($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $23,$23 # byte swap(15) + rotr $23,$23,16 +#else + srl $12,$23,24 # byte swap(15) + srl $13,$23,8 + andi $14,$23,0xFF00 + sll $23,$23,24 + andi $13,0xFF00 + sll $14,$14,8 + or $23,$12 + or $13,$14 + or $23,$13 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $14,$30,$31 # 15 + rotr $12,$25,6 + addu $11,$23,$1 + rotr $13,$25,11 + and $14,$25 + rotr $1,$25,25 + xor $12,$13 + rotr $13,$2,2 + xor $14,$31 # Ch(e,f,g) + xor $12,$1 # Sigma1(e) + + rotr $1,$2,13 + addu $11,$14 + lw $14,60($6) # K[15] + xor $1,$13 + rotr $13,$2,22 + addu $11,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#else + addu $11,$23,$1 # 15 + srl $1,$25,6 + xor $14,$30,$31 + sll $13,$25,7 + and $14,$25 + srl $12,$25,11 + xor $1,$13 + sll $13,$25,21 + xor $1,$12 + srl $12,$25,25 + xor $1,$13 + sll $13,$25,26 + xor $1,$12 + xor $14,$31 # Ch(e,f,g) + xor $12,$13,$1 # Sigma1(e) + + srl $1,$2,2 + addu $11,$14 + lw $14,60($6) # K[15] + sll $13,$2,10 + addu $11,$12 + srl $12,$2,13 + xor $1,$13 + sll $13,$2,19 + xor $1,$12 + srl $12,$2,22 + xor $1,$13 + sll $13,$2,30 + xor $1,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#endif + sw $23,60($29) # offload to ring buffer + addu $1,$12 + and $13,$2 + addu $11,$14 # +=K[15] + addu $1,$13 # +=Maj(a,b,c) + addu $24,$11 + addu $1,$11 + lw $10,8($29) # prefetch from ring buffer + b .L16_xx +.align 4 +.L16_xx: +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $14,$9,3 # Xupdate(16) + rotr $12,$9,7 + addu $8,$17 # +=X[i+9] + xor $14,$12 + rotr $12,$9,18 + + srl $15,$22,10 + rotr $13,$22,17 + xor $14,$12 # sigma0(X[i+1]) + rotr $12,$22,19 + xor $15,$13 + addu $8,$14 +#else + srl $14,$9,3 # Xupdate(16) + addu $8,$17 # +=X[i+9] + sll $13,$9,14 + srl $12,$9,7 + xor $14,$13 + sll $13,11 + xor $14,$12 + srl $12,$9,18 + xor $14,$13 + + srl $15,$22,10 + xor $14,$12 # sigma0(X[i+1]) + sll $13,$22,13 + addu $8,$14 + srl $12,$22,17 + xor $15,$13 + sll $13,2 + xor $15,$12 + srl $12,$22,19 + xor $15,$13 +#endif + xor $15,$12 # sigma1(X[i+14]) + addu $8,$15 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $15,$25,$30 # 16 + rotr $13,$24,6 + addu $12,$8,$31 + rotr $14,$24,11 + and $15,$24 + rotr $31,$24,25 + xor $13,$14 + rotr $14,$1,2 + xor $15,$30 # Ch(e,f,g) + xor $13,$31 # Sigma1(e) + + rotr $31,$1,13 + addu $12,$15 + lw $15,64($6) # K[16] + xor $31,$14 + rotr $14,$1,22 + addu $12,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#else + addu $12,$8,$31 # 16 + srl $31,$24,6 + xor $15,$25,$30 + sll $14,$24,7 + and $15,$24 + srl $13,$24,11 + xor $31,$14 + sll $14,$24,21 + xor $31,$13 + srl $13,$24,25 + xor $31,$14 + sll $14,$24,26 + xor $31,$13 + xor $15,$30 # Ch(e,f,g) + xor $13,$14,$31 # Sigma1(e) + + srl $31,$1,2 + addu $12,$15 + lw $15,64($6) # K[16] + sll $14,$1,10 + addu $12,$13 + srl $13,$1,13 + xor $31,$14 + sll $14,$1,19 + xor $31,$13 + srl $13,$1,22 + xor $31,$14 + sll $14,$1,30 + xor $31,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#endif + sw $8,0($29) # offload to ring buffer + addu $31,$13 + and $14,$1 + addu $12,$15 # +=K[16] + addu $31,$14 # +=Maj(a,b,c) + addu $7,$12 + addu $31,$12 + lw $11,12($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $15,$10,3 # Xupdate(17) + rotr $13,$10,7 + addu $9,$18 # +=X[i+9] + xor $15,$13 + rotr $13,$10,18 + + srl $16,$23,10 + rotr $14,$23,17 + xor $15,$13 # sigma0(X[i+1]) + rotr $13,$23,19 + xor $16,$14 + addu $9,$15 +#else + srl $15,$10,3 # Xupdate(17) + addu $9,$18 # +=X[i+9] + sll $14,$10,14 + srl $13,$10,7 + xor $15,$14 + sll $14,11 + xor $15,$13 + srl $13,$10,18 + xor $15,$14 + + srl $16,$23,10 + xor $15,$13 # sigma0(X[i+1]) + sll $14,$23,13 + addu $9,$15 + srl $13,$23,17 + xor $16,$14 + sll $14,2 + xor $16,$13 + srl $13,$23,19 + xor $16,$14 +#endif + xor $16,$13 # sigma1(X[i+14]) + addu $9,$16 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $16,$24,$25 # 17 + rotr $14,$7,6 + addu $13,$9,$30 + rotr $15,$7,11 + and $16,$7 + rotr $30,$7,25 + xor $14,$15 + rotr $15,$31,2 + xor $16,$25 # Ch(e,f,g) + xor $14,$30 # Sigma1(e) + + rotr $30,$31,13 + addu $13,$16 + lw $16,68($6) # K[17] + xor $30,$15 + rotr $15,$31,22 + addu $13,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#else + addu $13,$9,$30 # 17 + srl $30,$7,6 + xor $16,$24,$25 + sll $15,$7,7 + and $16,$7 + srl $14,$7,11 + xor $30,$15 + sll $15,$7,21 + xor $30,$14 + srl $14,$7,25 + xor $30,$15 + sll $15,$7,26 + xor $30,$14 + xor $16,$25 # Ch(e,f,g) + xor $14,$15,$30 # Sigma1(e) + + srl $30,$31,2 + addu $13,$16 + lw $16,68($6) # K[17] + sll $15,$31,10 + addu $13,$14 + srl $14,$31,13 + xor $30,$15 + sll $15,$31,19 + xor $30,$14 + srl $14,$31,22 + xor $30,$15 + sll $15,$31,30 + xor $30,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#endif + sw $9,4($29) # offload to ring buffer + addu $30,$14 + and $15,$31 + addu $13,$16 # +=K[17] + addu $30,$15 # +=Maj(a,b,c) + addu $3,$13 + addu $30,$13 + lw $12,16($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $16,$11,3 # Xupdate(18) + rotr $14,$11,7 + addu $10,$19 # +=X[i+9] + xor $16,$14 + rotr $14,$11,18 + + srl $17,$8,10 + rotr $15,$8,17 + xor $16,$14 # sigma0(X[i+1]) + rotr $14,$8,19 + xor $17,$15 + addu $10,$16 +#else + srl $16,$11,3 # Xupdate(18) + addu $10,$19 # +=X[i+9] + sll $15,$11,14 + srl $14,$11,7 + xor $16,$15 + sll $15,11 + xor $16,$14 + srl $14,$11,18 + xor $16,$15 + + srl $17,$8,10 + xor $16,$14 # sigma0(X[i+1]) + sll $15,$8,13 + addu $10,$16 + srl $14,$8,17 + xor $17,$15 + sll $15,2 + xor $17,$14 + srl $14,$8,19 + xor $17,$15 +#endif + xor $17,$14 # sigma1(X[i+14]) + addu $10,$17 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $17,$7,$24 # 18 + rotr $15,$3,6 + addu $14,$10,$25 + rotr $16,$3,11 + and $17,$3 + rotr $25,$3,25 + xor $15,$16 + rotr $16,$30,2 + xor $17,$24 # Ch(e,f,g) + xor $15,$25 # Sigma1(e) + + rotr $25,$30,13 + addu $14,$17 + lw $17,72($6) # K[18] + xor $25,$16 + rotr $16,$30,22 + addu $14,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#else + addu $14,$10,$25 # 18 + srl $25,$3,6 + xor $17,$7,$24 + sll $16,$3,7 + and $17,$3 + srl $15,$3,11 + xor $25,$16 + sll $16,$3,21 + xor $25,$15 + srl $15,$3,25 + xor $25,$16 + sll $16,$3,26 + xor $25,$15 + xor $17,$24 # Ch(e,f,g) + xor $15,$16,$25 # Sigma1(e) + + srl $25,$30,2 + addu $14,$17 + lw $17,72($6) # K[18] + sll $16,$30,10 + addu $14,$15 + srl $15,$30,13 + xor $25,$16 + sll $16,$30,19 + xor $25,$15 + srl $15,$30,22 + xor $25,$16 + sll $16,$30,30 + xor $25,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#endif + sw $10,8($29) # offload to ring buffer + addu $25,$15 + and $16,$30 + addu $14,$17 # +=K[18] + addu $25,$16 # +=Maj(a,b,c) + addu $2,$14 + addu $25,$14 + lw $13,20($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $17,$12,3 # Xupdate(19) + rotr $15,$12,7 + addu $11,$20 # +=X[i+9] + xor $17,$15 + rotr $15,$12,18 + + srl $18,$9,10 + rotr $16,$9,17 + xor $17,$15 # sigma0(X[i+1]) + rotr $15,$9,19 + xor $18,$16 + addu $11,$17 +#else + srl $17,$12,3 # Xupdate(19) + addu $11,$20 # +=X[i+9] + sll $16,$12,14 + srl $15,$12,7 + xor $17,$16 + sll $16,11 + xor $17,$15 + srl $15,$12,18 + xor $17,$16 + + srl $18,$9,10 + xor $17,$15 # sigma0(X[i+1]) + sll $16,$9,13 + addu $11,$17 + srl $15,$9,17 + xor $18,$16 + sll $16,2 + xor $18,$15 + srl $15,$9,19 + xor $18,$16 +#endif + xor $18,$15 # sigma1(X[i+14]) + addu $11,$18 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $18,$3,$7 # 19 + rotr $16,$2,6 + addu $15,$11,$24 + rotr $17,$2,11 + and $18,$2 + rotr $24,$2,25 + xor $16,$17 + rotr $17,$25,2 + xor $18,$7 # Ch(e,f,g) + xor $16,$24 # Sigma1(e) + + rotr $24,$25,13 + addu $15,$18 + lw $18,76($6) # K[19] + xor $24,$17 + rotr $17,$25,22 + addu $15,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#else + addu $15,$11,$24 # 19 + srl $24,$2,6 + xor $18,$3,$7 + sll $17,$2,7 + and $18,$2 + srl $16,$2,11 + xor $24,$17 + sll $17,$2,21 + xor $24,$16 + srl $16,$2,25 + xor $24,$17 + sll $17,$2,26 + xor $24,$16 + xor $18,$7 # Ch(e,f,g) + xor $16,$17,$24 # Sigma1(e) + + srl $24,$25,2 + addu $15,$18 + lw $18,76($6) # K[19] + sll $17,$25,10 + addu $15,$16 + srl $16,$25,13 + xor $24,$17 + sll $17,$25,19 + xor $24,$16 + srl $16,$25,22 + xor $24,$17 + sll $17,$25,30 + xor $24,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#endif + sw $11,12($29) # offload to ring buffer + addu $24,$16 + and $17,$25 + addu $15,$18 # +=K[19] + addu $24,$17 # +=Maj(a,b,c) + addu $1,$15 + addu $24,$15 + lw $14,24($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $18,$13,3 # Xupdate(20) + rotr $16,$13,7 + addu $12,$21 # +=X[i+9] + xor $18,$16 + rotr $16,$13,18 + + srl $19,$10,10 + rotr $17,$10,17 + xor $18,$16 # sigma0(X[i+1]) + rotr $16,$10,19 + xor $19,$17 + addu $12,$18 +#else + srl $18,$13,3 # Xupdate(20) + addu $12,$21 # +=X[i+9] + sll $17,$13,14 + srl $16,$13,7 + xor $18,$17 + sll $17,11 + xor $18,$16 + srl $16,$13,18 + xor $18,$17 + + srl $19,$10,10 + xor $18,$16 # sigma0(X[i+1]) + sll $17,$10,13 + addu $12,$18 + srl $16,$10,17 + xor $19,$17 + sll $17,2 + xor $19,$16 + srl $16,$10,19 + xor $19,$17 +#endif + xor $19,$16 # sigma1(X[i+14]) + addu $12,$19 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $19,$2,$3 # 20 + rotr $17,$1,6 + addu $16,$12,$7 + rotr $18,$1,11 + and $19,$1 + rotr $7,$1,25 + xor $17,$18 + rotr $18,$24,2 + xor $19,$3 # Ch(e,f,g) + xor $17,$7 # Sigma1(e) + + rotr $7,$24,13 + addu $16,$19 + lw $19,80($6) # K[20] + xor $7,$18 + rotr $18,$24,22 + addu $16,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#else + addu $16,$12,$7 # 20 + srl $7,$1,6 + xor $19,$2,$3 + sll $18,$1,7 + and $19,$1 + srl $17,$1,11 + xor $7,$18 + sll $18,$1,21 + xor $7,$17 + srl $17,$1,25 + xor $7,$18 + sll $18,$1,26 + xor $7,$17 + xor $19,$3 # Ch(e,f,g) + xor $17,$18,$7 # Sigma1(e) + + srl $7,$24,2 + addu $16,$19 + lw $19,80($6) # K[20] + sll $18,$24,10 + addu $16,$17 + srl $17,$24,13 + xor $7,$18 + sll $18,$24,19 + xor $7,$17 + srl $17,$24,22 + xor $7,$18 + sll $18,$24,30 + xor $7,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#endif + sw $12,16($29) # offload to ring buffer + addu $7,$17 + and $18,$24 + addu $16,$19 # +=K[20] + addu $7,$18 # +=Maj(a,b,c) + addu $31,$16 + addu $7,$16 + lw $15,28($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $19,$14,3 # Xupdate(21) + rotr $17,$14,7 + addu $13,$22 # +=X[i+9] + xor $19,$17 + rotr $17,$14,18 + + srl $20,$11,10 + rotr $18,$11,17 + xor $19,$17 # sigma0(X[i+1]) + rotr $17,$11,19 + xor $20,$18 + addu $13,$19 +#else + srl $19,$14,3 # Xupdate(21) + addu $13,$22 # +=X[i+9] + sll $18,$14,14 + srl $17,$14,7 + xor $19,$18 + sll $18,11 + xor $19,$17 + srl $17,$14,18 + xor $19,$18 + + srl $20,$11,10 + xor $19,$17 # sigma0(X[i+1]) + sll $18,$11,13 + addu $13,$19 + srl $17,$11,17 + xor $20,$18 + sll $18,2 + xor $20,$17 + srl $17,$11,19 + xor $20,$18 +#endif + xor $20,$17 # sigma1(X[i+14]) + addu $13,$20 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $20,$1,$2 # 21 + rotr $18,$31,6 + addu $17,$13,$3 + rotr $19,$31,11 + and $20,$31 + rotr $3,$31,25 + xor $18,$19 + rotr $19,$7,2 + xor $20,$2 # Ch(e,f,g) + xor $18,$3 # Sigma1(e) + + rotr $3,$7,13 + addu $17,$20 + lw $20,84($6) # K[21] + xor $3,$19 + rotr $19,$7,22 + addu $17,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#else + addu $17,$13,$3 # 21 + srl $3,$31,6 + xor $20,$1,$2 + sll $19,$31,7 + and $20,$31 + srl $18,$31,11 + xor $3,$19 + sll $19,$31,21 + xor $3,$18 + srl $18,$31,25 + xor $3,$19 + sll $19,$31,26 + xor $3,$18 + xor $20,$2 # Ch(e,f,g) + xor $18,$19,$3 # Sigma1(e) + + srl $3,$7,2 + addu $17,$20 + lw $20,84($6) # K[21] + sll $19,$7,10 + addu $17,$18 + srl $18,$7,13 + xor $3,$19 + sll $19,$7,19 + xor $3,$18 + srl $18,$7,22 + xor $3,$19 + sll $19,$7,30 + xor $3,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#endif + sw $13,20($29) # offload to ring buffer + addu $3,$18 + and $19,$7 + addu $17,$20 # +=K[21] + addu $3,$19 # +=Maj(a,b,c) + addu $30,$17 + addu $3,$17 + lw $16,32($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $20,$15,3 # Xupdate(22) + rotr $18,$15,7 + addu $14,$23 # +=X[i+9] + xor $20,$18 + rotr $18,$15,18 + + srl $21,$12,10 + rotr $19,$12,17 + xor $20,$18 # sigma0(X[i+1]) + rotr $18,$12,19 + xor $21,$19 + addu $14,$20 +#else + srl $20,$15,3 # Xupdate(22) + addu $14,$23 # +=X[i+9] + sll $19,$15,14 + srl $18,$15,7 + xor $20,$19 + sll $19,11 + xor $20,$18 + srl $18,$15,18 + xor $20,$19 + + srl $21,$12,10 + xor $20,$18 # sigma0(X[i+1]) + sll $19,$12,13 + addu $14,$20 + srl $18,$12,17 + xor $21,$19 + sll $19,2 + xor $21,$18 + srl $18,$12,19 + xor $21,$19 +#endif + xor $21,$18 # sigma1(X[i+14]) + addu $14,$21 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $21,$31,$1 # 22 + rotr $19,$30,6 + addu $18,$14,$2 + rotr $20,$30,11 + and $21,$30 + rotr $2,$30,25 + xor $19,$20 + rotr $20,$3,2 + xor $21,$1 # Ch(e,f,g) + xor $19,$2 # Sigma1(e) + + rotr $2,$3,13 + addu $18,$21 + lw $21,88($6) # K[22] + xor $2,$20 + rotr $20,$3,22 + addu $18,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#else + addu $18,$14,$2 # 22 + srl $2,$30,6 + xor $21,$31,$1 + sll $20,$30,7 + and $21,$30 + srl $19,$30,11 + xor $2,$20 + sll $20,$30,21 + xor $2,$19 + srl $19,$30,25 + xor $2,$20 + sll $20,$30,26 + xor $2,$19 + xor $21,$1 # Ch(e,f,g) + xor $19,$20,$2 # Sigma1(e) + + srl $2,$3,2 + addu $18,$21 + lw $21,88($6) # K[22] + sll $20,$3,10 + addu $18,$19 + srl $19,$3,13 + xor $2,$20 + sll $20,$3,19 + xor $2,$19 + srl $19,$3,22 + xor $2,$20 + sll $20,$3,30 + xor $2,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#endif + sw $14,24($29) # offload to ring buffer + addu $2,$19 + and $20,$3 + addu $18,$21 # +=K[22] + addu $2,$20 # +=Maj(a,b,c) + addu $25,$18 + addu $2,$18 + lw $17,36($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $21,$16,3 # Xupdate(23) + rotr $19,$16,7 + addu $15,$8 # +=X[i+9] + xor $21,$19 + rotr $19,$16,18 + + srl $22,$13,10 + rotr $20,$13,17 + xor $21,$19 # sigma0(X[i+1]) + rotr $19,$13,19 + xor $22,$20 + addu $15,$21 +#else + srl $21,$16,3 # Xupdate(23) + addu $15,$8 # +=X[i+9] + sll $20,$16,14 + srl $19,$16,7 + xor $21,$20 + sll $20,11 + xor $21,$19 + srl $19,$16,18 + xor $21,$20 + + srl $22,$13,10 + xor $21,$19 # sigma0(X[i+1]) + sll $20,$13,13 + addu $15,$21 + srl $19,$13,17 + xor $22,$20 + sll $20,2 + xor $22,$19 + srl $19,$13,19 + xor $22,$20 +#endif + xor $22,$19 # sigma1(X[i+14]) + addu $15,$22 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $22,$30,$31 # 23 + rotr $20,$25,6 + addu $19,$15,$1 + rotr $21,$25,11 + and $22,$25 + rotr $1,$25,25 + xor $20,$21 + rotr $21,$2,2 + xor $22,$31 # Ch(e,f,g) + xor $20,$1 # Sigma1(e) + + rotr $1,$2,13 + addu $19,$22 + lw $22,92($6) # K[23] + xor $1,$21 + rotr $21,$2,22 + addu $19,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#else + addu $19,$15,$1 # 23 + srl $1,$25,6 + xor $22,$30,$31 + sll $21,$25,7 + and $22,$25 + srl $20,$25,11 + xor $1,$21 + sll $21,$25,21 + xor $1,$20 + srl $20,$25,25 + xor $1,$21 + sll $21,$25,26 + xor $1,$20 + xor $22,$31 # Ch(e,f,g) + xor $20,$21,$1 # Sigma1(e) + + srl $1,$2,2 + addu $19,$22 + lw $22,92($6) # K[23] + sll $21,$2,10 + addu $19,$20 + srl $20,$2,13 + xor $1,$21 + sll $21,$2,19 + xor $1,$20 + srl $20,$2,22 + xor $1,$21 + sll $21,$2,30 + xor $1,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#endif + sw $15,28($29) # offload to ring buffer + addu $1,$20 + and $21,$2 + addu $19,$22 # +=K[23] + addu $1,$21 # +=Maj(a,b,c) + addu $24,$19 + addu $1,$19 + lw $18,40($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $22,$17,3 # Xupdate(24) + rotr $20,$17,7 + addu $16,$9 # +=X[i+9] + xor $22,$20 + rotr $20,$17,18 + + srl $23,$14,10 + rotr $21,$14,17 + xor $22,$20 # sigma0(X[i+1]) + rotr $20,$14,19 + xor $23,$21 + addu $16,$22 +#else + srl $22,$17,3 # Xupdate(24) + addu $16,$9 # +=X[i+9] + sll $21,$17,14 + srl $20,$17,7 + xor $22,$21 + sll $21,11 + xor $22,$20 + srl $20,$17,18 + xor $22,$21 + + srl $23,$14,10 + xor $22,$20 # sigma0(X[i+1]) + sll $21,$14,13 + addu $16,$22 + srl $20,$14,17 + xor $23,$21 + sll $21,2 + xor $23,$20 + srl $20,$14,19 + xor $23,$21 +#endif + xor $23,$20 # sigma1(X[i+14]) + addu $16,$23 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $23,$25,$30 # 24 + rotr $21,$24,6 + addu $20,$16,$31 + rotr $22,$24,11 + and $23,$24 + rotr $31,$24,25 + xor $21,$22 + rotr $22,$1,2 + xor $23,$30 # Ch(e,f,g) + xor $21,$31 # Sigma1(e) + + rotr $31,$1,13 + addu $20,$23 + lw $23,96($6) # K[24] + xor $31,$22 + rotr $22,$1,22 + addu $20,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#else + addu $20,$16,$31 # 24 + srl $31,$24,6 + xor $23,$25,$30 + sll $22,$24,7 + and $23,$24 + srl $21,$24,11 + xor $31,$22 + sll $22,$24,21 + xor $31,$21 + srl $21,$24,25 + xor $31,$22 + sll $22,$24,26 + xor $31,$21 + xor $23,$30 # Ch(e,f,g) + xor $21,$22,$31 # Sigma1(e) + + srl $31,$1,2 + addu $20,$23 + lw $23,96($6) # K[24] + sll $22,$1,10 + addu $20,$21 + srl $21,$1,13 + xor $31,$22 + sll $22,$1,19 + xor $31,$21 + srl $21,$1,22 + xor $31,$22 + sll $22,$1,30 + xor $31,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#endif + sw $16,32($29) # offload to ring buffer + addu $31,$21 + and $22,$1 + addu $20,$23 # +=K[24] + addu $31,$22 # +=Maj(a,b,c) + addu $7,$20 + addu $31,$20 + lw $19,44($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $23,$18,3 # Xupdate(25) + rotr $21,$18,7 + addu $17,$10 # +=X[i+9] + xor $23,$21 + rotr $21,$18,18 + + srl $8,$15,10 + rotr $22,$15,17 + xor $23,$21 # sigma0(X[i+1]) + rotr $21,$15,19 + xor $8,$22 + addu $17,$23 +#else + srl $23,$18,3 # Xupdate(25) + addu $17,$10 # +=X[i+9] + sll $22,$18,14 + srl $21,$18,7 + xor $23,$22 + sll $22,11 + xor $23,$21 + srl $21,$18,18 + xor $23,$22 + + srl $8,$15,10 + xor $23,$21 # sigma0(X[i+1]) + sll $22,$15,13 + addu $17,$23 + srl $21,$15,17 + xor $8,$22 + sll $22,2 + xor $8,$21 + srl $21,$15,19 + xor $8,$22 +#endif + xor $8,$21 # sigma1(X[i+14]) + addu $17,$8 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $8,$24,$25 # 25 + rotr $22,$7,6 + addu $21,$17,$30 + rotr $23,$7,11 + and $8,$7 + rotr $30,$7,25 + xor $22,$23 + rotr $23,$31,2 + xor $8,$25 # Ch(e,f,g) + xor $22,$30 # Sigma1(e) + + rotr $30,$31,13 + addu $21,$8 + lw $8,100($6) # K[25] + xor $30,$23 + rotr $23,$31,22 + addu $21,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#else + addu $21,$17,$30 # 25 + srl $30,$7,6 + xor $8,$24,$25 + sll $23,$7,7 + and $8,$7 + srl $22,$7,11 + xor $30,$23 + sll $23,$7,21 + xor $30,$22 + srl $22,$7,25 + xor $30,$23 + sll $23,$7,26 + xor $30,$22 + xor $8,$25 # Ch(e,f,g) + xor $22,$23,$30 # Sigma1(e) + + srl $30,$31,2 + addu $21,$8 + lw $8,100($6) # K[25] + sll $23,$31,10 + addu $21,$22 + srl $22,$31,13 + xor $30,$23 + sll $23,$31,19 + xor $30,$22 + srl $22,$31,22 + xor $30,$23 + sll $23,$31,30 + xor $30,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#endif + sw $17,36($29) # offload to ring buffer + addu $30,$22 + and $23,$31 + addu $21,$8 # +=K[25] + addu $30,$23 # +=Maj(a,b,c) + addu $3,$21 + addu $30,$21 + lw $20,48($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $8,$19,3 # Xupdate(26) + rotr $22,$19,7 + addu $18,$11 # +=X[i+9] + xor $8,$22 + rotr $22,$19,18 + + srl $9,$16,10 + rotr $23,$16,17 + xor $8,$22 # sigma0(X[i+1]) + rotr $22,$16,19 + xor $9,$23 + addu $18,$8 +#else + srl $8,$19,3 # Xupdate(26) + addu $18,$11 # +=X[i+9] + sll $23,$19,14 + srl $22,$19,7 + xor $8,$23 + sll $23,11 + xor $8,$22 + srl $22,$19,18 + xor $8,$23 + + srl $9,$16,10 + xor $8,$22 # sigma0(X[i+1]) + sll $23,$16,13 + addu $18,$8 + srl $22,$16,17 + xor $9,$23 + sll $23,2 + xor $9,$22 + srl $22,$16,19 + xor $9,$23 +#endif + xor $9,$22 # sigma1(X[i+14]) + addu $18,$9 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $9,$7,$24 # 26 + rotr $23,$3,6 + addu $22,$18,$25 + rotr $8,$3,11 + and $9,$3 + rotr $25,$3,25 + xor $23,$8 + rotr $8,$30,2 + xor $9,$24 # Ch(e,f,g) + xor $23,$25 # Sigma1(e) + + rotr $25,$30,13 + addu $22,$9 + lw $9,104($6) # K[26] + xor $25,$8 + rotr $8,$30,22 + addu $22,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#else + addu $22,$18,$25 # 26 + srl $25,$3,6 + xor $9,$7,$24 + sll $8,$3,7 + and $9,$3 + srl $23,$3,11 + xor $25,$8 + sll $8,$3,21 + xor $25,$23 + srl $23,$3,25 + xor $25,$8 + sll $8,$3,26 + xor $25,$23 + xor $9,$24 # Ch(e,f,g) + xor $23,$8,$25 # Sigma1(e) + + srl $25,$30,2 + addu $22,$9 + lw $9,104($6) # K[26] + sll $8,$30,10 + addu $22,$23 + srl $23,$30,13 + xor $25,$8 + sll $8,$30,19 + xor $25,$23 + srl $23,$30,22 + xor $25,$8 + sll $8,$30,30 + xor $25,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#endif + sw $18,40($29) # offload to ring buffer + addu $25,$23 + and $8,$30 + addu $22,$9 # +=K[26] + addu $25,$8 # +=Maj(a,b,c) + addu $2,$22 + addu $25,$22 + lw $21,52($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $9,$20,3 # Xupdate(27) + rotr $23,$20,7 + addu $19,$12 # +=X[i+9] + xor $9,$23 + rotr $23,$20,18 + + srl $10,$17,10 + rotr $8,$17,17 + xor $9,$23 # sigma0(X[i+1]) + rotr $23,$17,19 + xor $10,$8 + addu $19,$9 +#else + srl $9,$20,3 # Xupdate(27) + addu $19,$12 # +=X[i+9] + sll $8,$20,14 + srl $23,$20,7 + xor $9,$8 + sll $8,11 + xor $9,$23 + srl $23,$20,18 + xor $9,$8 + + srl $10,$17,10 + xor $9,$23 # sigma0(X[i+1]) + sll $8,$17,13 + addu $19,$9 + srl $23,$17,17 + xor $10,$8 + sll $8,2 + xor $10,$23 + srl $23,$17,19 + xor $10,$8 +#endif + xor $10,$23 # sigma1(X[i+14]) + addu $19,$10 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $10,$3,$7 # 27 + rotr $8,$2,6 + addu $23,$19,$24 + rotr $9,$2,11 + and $10,$2 + rotr $24,$2,25 + xor $8,$9 + rotr $9,$25,2 + xor $10,$7 # Ch(e,f,g) + xor $8,$24 # Sigma1(e) + + rotr $24,$25,13 + addu $23,$10 + lw $10,108($6) # K[27] + xor $24,$9 + rotr $9,$25,22 + addu $23,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#else + addu $23,$19,$24 # 27 + srl $24,$2,6 + xor $10,$3,$7 + sll $9,$2,7 + and $10,$2 + srl $8,$2,11 + xor $24,$9 + sll $9,$2,21 + xor $24,$8 + srl $8,$2,25 + xor $24,$9 + sll $9,$2,26 + xor $24,$8 + xor $10,$7 # Ch(e,f,g) + xor $8,$9,$24 # Sigma1(e) + + srl $24,$25,2 + addu $23,$10 + lw $10,108($6) # K[27] + sll $9,$25,10 + addu $23,$8 + srl $8,$25,13 + xor $24,$9 + sll $9,$25,19 + xor $24,$8 + srl $8,$25,22 + xor $24,$9 + sll $9,$25,30 + xor $24,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#endif + sw $19,44($29) # offload to ring buffer + addu $24,$8 + and $9,$25 + addu $23,$10 # +=K[27] + addu $24,$9 # +=Maj(a,b,c) + addu $1,$23 + addu $24,$23 + lw $22,56($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $10,$21,3 # Xupdate(28) + rotr $8,$21,7 + addu $20,$13 # +=X[i+9] + xor $10,$8 + rotr $8,$21,18 + + srl $11,$18,10 + rotr $9,$18,17 + xor $10,$8 # sigma0(X[i+1]) + rotr $8,$18,19 + xor $11,$9 + addu $20,$10 +#else + srl $10,$21,3 # Xupdate(28) + addu $20,$13 # +=X[i+9] + sll $9,$21,14 + srl $8,$21,7 + xor $10,$9 + sll $9,11 + xor $10,$8 + srl $8,$21,18 + xor $10,$9 + + srl $11,$18,10 + xor $10,$8 # sigma0(X[i+1]) + sll $9,$18,13 + addu $20,$10 + srl $8,$18,17 + xor $11,$9 + sll $9,2 + xor $11,$8 + srl $8,$18,19 + xor $11,$9 +#endif + xor $11,$8 # sigma1(X[i+14]) + addu $20,$11 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $11,$2,$3 # 28 + rotr $9,$1,6 + addu $8,$20,$7 + rotr $10,$1,11 + and $11,$1 + rotr $7,$1,25 + xor $9,$10 + rotr $10,$24,2 + xor $11,$3 # Ch(e,f,g) + xor $9,$7 # Sigma1(e) + + rotr $7,$24,13 + addu $8,$11 + lw $11,112($6) # K[28] + xor $7,$10 + rotr $10,$24,22 + addu $8,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#else + addu $8,$20,$7 # 28 + srl $7,$1,6 + xor $11,$2,$3 + sll $10,$1,7 + and $11,$1 + srl $9,$1,11 + xor $7,$10 + sll $10,$1,21 + xor $7,$9 + srl $9,$1,25 + xor $7,$10 + sll $10,$1,26 + xor $7,$9 + xor $11,$3 # Ch(e,f,g) + xor $9,$10,$7 # Sigma1(e) + + srl $7,$24,2 + addu $8,$11 + lw $11,112($6) # K[28] + sll $10,$24,10 + addu $8,$9 + srl $9,$24,13 + xor $7,$10 + sll $10,$24,19 + xor $7,$9 + srl $9,$24,22 + xor $7,$10 + sll $10,$24,30 + xor $7,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#endif + sw $20,48($29) # offload to ring buffer + addu $7,$9 + and $10,$24 + addu $8,$11 # +=K[28] + addu $7,$10 # +=Maj(a,b,c) + addu $31,$8 + addu $7,$8 + lw $23,60($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $11,$22,3 # Xupdate(29) + rotr $9,$22,7 + addu $21,$14 # +=X[i+9] + xor $11,$9 + rotr $9,$22,18 + + srl $12,$19,10 + rotr $10,$19,17 + xor $11,$9 # sigma0(X[i+1]) + rotr $9,$19,19 + xor $12,$10 + addu $21,$11 +#else + srl $11,$22,3 # Xupdate(29) + addu $21,$14 # +=X[i+9] + sll $10,$22,14 + srl $9,$22,7 + xor $11,$10 + sll $10,11 + xor $11,$9 + srl $9,$22,18 + xor $11,$10 + + srl $12,$19,10 + xor $11,$9 # sigma0(X[i+1]) + sll $10,$19,13 + addu $21,$11 + srl $9,$19,17 + xor $12,$10 + sll $10,2 + xor $12,$9 + srl $9,$19,19 + xor $12,$10 +#endif + xor $12,$9 # sigma1(X[i+14]) + addu $21,$12 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $12,$1,$2 # 29 + rotr $10,$31,6 + addu $9,$21,$3 + rotr $11,$31,11 + and $12,$31 + rotr $3,$31,25 + xor $10,$11 + rotr $11,$7,2 + xor $12,$2 # Ch(e,f,g) + xor $10,$3 # Sigma1(e) + + rotr $3,$7,13 + addu $9,$12 + lw $12,116($6) # K[29] + xor $3,$11 + rotr $11,$7,22 + addu $9,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#else + addu $9,$21,$3 # 29 + srl $3,$31,6 + xor $12,$1,$2 + sll $11,$31,7 + and $12,$31 + srl $10,$31,11 + xor $3,$11 + sll $11,$31,21 + xor $3,$10 + srl $10,$31,25 + xor $3,$11 + sll $11,$31,26 + xor $3,$10 + xor $12,$2 # Ch(e,f,g) + xor $10,$11,$3 # Sigma1(e) + + srl $3,$7,2 + addu $9,$12 + lw $12,116($6) # K[29] + sll $11,$7,10 + addu $9,$10 + srl $10,$7,13 + xor $3,$11 + sll $11,$7,19 + xor $3,$10 + srl $10,$7,22 + xor $3,$11 + sll $11,$7,30 + xor $3,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#endif + sw $21,52($29) # offload to ring buffer + addu $3,$10 + and $11,$7 + addu $9,$12 # +=K[29] + addu $3,$11 # +=Maj(a,b,c) + addu $30,$9 + addu $3,$9 + lw $8,0($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $12,$23,3 # Xupdate(30) + rotr $10,$23,7 + addu $22,$15 # +=X[i+9] + xor $12,$10 + rotr $10,$23,18 + + srl $13,$20,10 + rotr $11,$20,17 + xor $12,$10 # sigma0(X[i+1]) + rotr $10,$20,19 + xor $13,$11 + addu $22,$12 +#else + srl $12,$23,3 # Xupdate(30) + addu $22,$15 # +=X[i+9] + sll $11,$23,14 + srl $10,$23,7 + xor $12,$11 + sll $11,11 + xor $12,$10 + srl $10,$23,18 + xor $12,$11 + + srl $13,$20,10 + xor $12,$10 # sigma0(X[i+1]) + sll $11,$20,13 + addu $22,$12 + srl $10,$20,17 + xor $13,$11 + sll $11,2 + xor $13,$10 + srl $10,$20,19 + xor $13,$11 +#endif + xor $13,$10 # sigma1(X[i+14]) + addu $22,$13 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $13,$31,$1 # 30 + rotr $11,$30,6 + addu $10,$22,$2 + rotr $12,$30,11 + and $13,$30 + rotr $2,$30,25 + xor $11,$12 + rotr $12,$3,2 + xor $13,$1 # Ch(e,f,g) + xor $11,$2 # Sigma1(e) + + rotr $2,$3,13 + addu $10,$13 + lw $13,120($6) # K[30] + xor $2,$12 + rotr $12,$3,22 + addu $10,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#else + addu $10,$22,$2 # 30 + srl $2,$30,6 + xor $13,$31,$1 + sll $12,$30,7 + and $13,$30 + srl $11,$30,11 + xor $2,$12 + sll $12,$30,21 + xor $2,$11 + srl $11,$30,25 + xor $2,$12 + sll $12,$30,26 + xor $2,$11 + xor $13,$1 # Ch(e,f,g) + xor $11,$12,$2 # Sigma1(e) + + srl $2,$3,2 + addu $10,$13 + lw $13,120($6) # K[30] + sll $12,$3,10 + addu $10,$11 + srl $11,$3,13 + xor $2,$12 + sll $12,$3,19 + xor $2,$11 + srl $11,$3,22 + xor $2,$12 + sll $12,$3,30 + xor $2,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#endif + sw $22,56($29) # offload to ring buffer + addu $2,$11 + and $12,$3 + addu $10,$13 # +=K[30] + addu $2,$12 # +=Maj(a,b,c) + addu $25,$10 + addu $2,$10 + lw $9,4($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $13,$8,3 # Xupdate(31) + rotr $11,$8,7 + addu $23,$16 # +=X[i+9] + xor $13,$11 + rotr $11,$8,18 + + srl $14,$21,10 + rotr $12,$21,17 + xor $13,$11 # sigma0(X[i+1]) + rotr $11,$21,19 + xor $14,$12 + addu $23,$13 +#else + srl $13,$8,3 # Xupdate(31) + addu $23,$16 # +=X[i+9] + sll $12,$8,14 + srl $11,$8,7 + xor $13,$12 + sll $12,11 + xor $13,$11 + srl $11,$8,18 + xor $13,$12 + + srl $14,$21,10 + xor $13,$11 # sigma0(X[i+1]) + sll $12,$21,13 + addu $23,$13 + srl $11,$21,17 + xor $14,$12 + sll $12,2 + xor $14,$11 + srl $11,$21,19 + xor $14,$12 +#endif + xor $14,$11 # sigma1(X[i+14]) + addu $23,$14 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $14,$30,$31 # 31 + rotr $12,$25,6 + addu $11,$23,$1 + rotr $13,$25,11 + and $14,$25 + rotr $1,$25,25 + xor $12,$13 + rotr $13,$2,2 + xor $14,$31 # Ch(e,f,g) + xor $12,$1 # Sigma1(e) + + rotr $1,$2,13 + addu $11,$14 + lw $14,124($6) # K[31] + xor $1,$13 + rotr $13,$2,22 + addu $11,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#else + addu $11,$23,$1 # 31 + srl $1,$25,6 + xor $14,$30,$31 + sll $13,$25,7 + and $14,$25 + srl $12,$25,11 + xor $1,$13 + sll $13,$25,21 + xor $1,$12 + srl $12,$25,25 + xor $1,$13 + sll $13,$25,26 + xor $1,$12 + xor $14,$31 # Ch(e,f,g) + xor $12,$13,$1 # Sigma1(e) + + srl $1,$2,2 + addu $11,$14 + lw $14,124($6) # K[31] + sll $13,$2,10 + addu $11,$12 + srl $12,$2,13 + xor $1,$13 + sll $13,$2,19 + xor $1,$12 + srl $12,$2,22 + xor $1,$13 + sll $13,$2,30 + xor $1,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#endif + sw $23,60($29) # offload to ring buffer + addu $1,$12 + and $13,$2 + addu $11,$14 # +=K[31] + addu $1,$13 # +=Maj(a,b,c) + addu $24,$11 + addu $1,$11 + lw $10,8($29) # prefetch from ring buffer + and $14,0xfff + li $15,2290 + .set noreorder + bne $14,$15,.L16_xx + daddu $6,16*4 # Ktbl+=16 + + ld $23,16*4($29) # restore pointer to the end of input + lw $8,0*4($4) + lw $9,1*4($4) + lw $10,2*4($4) + daddu $5,16*4 + lw $11,3*4($4) + addu $1,$8 + lw $12,4*4($4) + addu $2,$9 + lw $13,5*4($4) + addu $3,$10 + lw $14,6*4($4) + addu $7,$11 + lw $15,7*4($4) + addu $24,$12 + sw $1,0*4($4) + addu $25,$13 + sw $2,1*4($4) + addu $30,$14 + sw $3,2*4($4) + addu $31,$15 + sw $7,3*4($4) + sw $24,4*4($4) + sw $25,5*4($4) + sw $30,6*4($4) + sw $31,7*4($4) + + bne $5,$23,.Loop + dsubu $6,192 # rewind $6 + + ld $31,192-1*8($29) + ld $30,192-2*8($29) + ld $23,192-3*8($29) + ld $22,192-4*8($29) + ld $21,192-5*8($29) + ld $20,192-6*8($29) + ld $19,192-7*8($29) + ld $18,192-8*8($29) + ld $17,192-9*8($29) + ld $16,192-10*8($29) + jr $31 + daddu $29,192 +.end sha256_block_data_order + +.rdata +.align 5 +K256: + .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 + .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5 + .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3 + .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174 + .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc + .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da + .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7 + .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967 + .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13 + .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85 + .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3 + .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070 + .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5 + .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3 + .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208 + .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 +.asciiz "SHA256 for MIPS, CRYPTOGAMS by " +.align 5 + diff --git a/deps/openssl/config/archs/linux64-mips64/asm/crypto/sha/sha512-mips.S b/deps/openssl/config/archs/linux64-mips64/asm/crypto/sha/sha512-mips.S new file mode 100644 index 00000000000000..6318b5a40f9bd0 --- /dev/null +++ b/deps/openssl/config/archs/linux64-mips64/asm/crypto/sha/sha512-mips.S @@ -0,0 +1,3205 @@ +#include "mips_arch.h" + +.text +.set noat +#if !defined(__mips_eabi) && (!defined(__vxworks) || defined(__pic__)) +.option pic2 +#endif + +.align 5 +.globl sha512_block_data_order +.ent sha512_block_data_order +sha512_block_data_order: + .frame $29,256,$31 + .mask 0xc0ff0000,-8 + .set noreorder + dsubu $29,256 + sd $31,256-1*8($29) + sd $30,256-2*8($29) + sd $23,256-3*8($29) + sd $22,256-4*8($29) + sd $21,256-5*8($29) + sd $20,256-6*8($29) + sd $19,256-7*8($29) + sd $18,256-8*8($29) + sd $17,256-9*8($29) + sd $16,256-10*8($29) + dsll $23,$6,7 + .cplocal $6 + .cpsetup $25,$0,sha512_block_data_order + .set reorder + dla $6,K512 # PIC-ified 'load address' + + ld $1,0*8($4) # load context + ld $2,1*8($4) + ld $3,2*8($4) + ld $7,3*8($4) + ld $24,4*8($4) + ld $25,5*8($4) + ld $30,6*8($4) + ld $31,7*8($4) + + daddu $23,$5 # pointer to the end of input + sd $23,16*8($29) + b .Loop + +.align 5 +.Loop: +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $8,($5) +#else + ldl $8,7($5) + ldr $8,0($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $9,8($5) +#else + ldl $9,15($5) + ldr $9,8($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $8,$8 # byte swap(0) + dshd $8,$8 +#else + ori $13,$0,0xFF + dsll $15,$13,32 + or $13,$15 # 0x000000FF000000FF + and $14,$8,$13 # byte swap(0) + dsrl $15,$8,24 + dsll $14,24 + and $15,$13 + dsll $13,8 # 0x0000FF000000FF00 + or $14,$15 + and $15,$8,$13 + dsrl $8,8 + dsll $15,8 + and $8,$13 + or $14,$15 + or $8,$14 + dsrl $14,$8,32 + dsll $8,32 + or $8,$14 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $15,$25,$30 # 0 + drotr $13,$24,14 + daddu $12,$8,$31 + drotr $14,$24,18 + and $15,$24 + drotr $31,$24,41 + xor $13,$14 + drotr $14,$1,28 + xor $15,$30 # Ch(e,f,g) + xor $13,$31 # Sigma1(e) + + drotr $31,$1,34 + daddu $12,$15 + ld $15,0($6) # K[0] + xor $31,$14 + drotr $14,$1,39 + daddu $12,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#else + daddu $12,$8,$31 # 0 + dsrl $31,$24,14 + xor $15,$25,$30 + dsll $14,$24,23 + and $15,$24 + dsrl $13,$24,18 + xor $31,$14 + dsll $14,$24,46 + xor $31,$13 + dsrl $13,$24,41 + xor $31,$14 + dsll $14,$24,50 + xor $31,$13 + xor $15,$30 # Ch(e,f,g) + xor $13,$14,$31 # Sigma1(e) + + dsrl $31,$1,28 + daddu $12,$15 + ld $15,0($6) # K[0] + dsll $14,$1,25 + daddu $12,$13 + dsrl $13,$1,34 + xor $31,$14 + dsll $14,$1,30 + xor $31,$13 + dsrl $13,$1,39 + xor $31,$14 + dsll $14,$1,36 + xor $31,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#endif + sd $8,0($29) # offload to ring buffer + daddu $31,$13 + and $14,$1 + daddu $12,$15 # +=K[0] + daddu $31,$14 # +=Maj(a,b,c) + daddu $7,$12 + daddu $31,$12 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $10,16($5) +#else + ldl $10,23($5) + ldr $10,16($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $9,$9 # byte swap(1) + dshd $9,$9 +#else + ori $14,$0,0xFF + dsll $16,$14,32 + or $14,$16 # 0x000000FF000000FF + and $15,$9,$14 # byte swap(1) + dsrl $16,$9,24 + dsll $15,24 + and $16,$14 + dsll $14,8 # 0x0000FF000000FF00 + or $15,$16 + and $16,$9,$14 + dsrl $9,8 + dsll $16,8 + and $9,$14 + or $15,$16 + or $9,$15 + dsrl $15,$9,32 + dsll $9,32 + or $9,$15 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $16,$24,$25 # 1 + drotr $14,$7,14 + daddu $13,$9,$30 + drotr $15,$7,18 + and $16,$7 + drotr $30,$7,41 + xor $14,$15 + drotr $15,$31,28 + xor $16,$25 # Ch(e,f,g) + xor $14,$30 # Sigma1(e) + + drotr $30,$31,34 + daddu $13,$16 + ld $16,8($6) # K[1] + xor $30,$15 + drotr $15,$31,39 + daddu $13,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#else + daddu $13,$9,$30 # 1 + dsrl $30,$7,14 + xor $16,$24,$25 + dsll $15,$7,23 + and $16,$7 + dsrl $14,$7,18 + xor $30,$15 + dsll $15,$7,46 + xor $30,$14 + dsrl $14,$7,41 + xor $30,$15 + dsll $15,$7,50 + xor $30,$14 + xor $16,$25 # Ch(e,f,g) + xor $14,$15,$30 # Sigma1(e) + + dsrl $30,$31,28 + daddu $13,$16 + ld $16,8($6) # K[1] + dsll $15,$31,25 + daddu $13,$14 + dsrl $14,$31,34 + xor $30,$15 + dsll $15,$31,30 + xor $30,$14 + dsrl $14,$31,39 + xor $30,$15 + dsll $15,$31,36 + xor $30,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#endif + sd $9,8($29) # offload to ring buffer + daddu $30,$14 + and $15,$31 + daddu $13,$16 # +=K[1] + daddu $30,$15 # +=Maj(a,b,c) + daddu $3,$13 + daddu $30,$13 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $11,24($5) +#else + ldl $11,31($5) + ldr $11,24($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $10,$10 # byte swap(2) + dshd $10,$10 +#else + ori $15,$0,0xFF + dsll $17,$15,32 + or $15,$17 # 0x000000FF000000FF + and $16,$10,$15 # byte swap(2) + dsrl $17,$10,24 + dsll $16,24 + and $17,$15 + dsll $15,8 # 0x0000FF000000FF00 + or $16,$17 + and $17,$10,$15 + dsrl $10,8 + dsll $17,8 + and $10,$15 + or $16,$17 + or $10,$16 + dsrl $16,$10,32 + dsll $10,32 + or $10,$16 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $17,$7,$24 # 2 + drotr $15,$3,14 + daddu $14,$10,$25 + drotr $16,$3,18 + and $17,$3 + drotr $25,$3,41 + xor $15,$16 + drotr $16,$30,28 + xor $17,$24 # Ch(e,f,g) + xor $15,$25 # Sigma1(e) + + drotr $25,$30,34 + daddu $14,$17 + ld $17,16($6) # K[2] + xor $25,$16 + drotr $16,$30,39 + daddu $14,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#else + daddu $14,$10,$25 # 2 + dsrl $25,$3,14 + xor $17,$7,$24 + dsll $16,$3,23 + and $17,$3 + dsrl $15,$3,18 + xor $25,$16 + dsll $16,$3,46 + xor $25,$15 + dsrl $15,$3,41 + xor $25,$16 + dsll $16,$3,50 + xor $25,$15 + xor $17,$24 # Ch(e,f,g) + xor $15,$16,$25 # Sigma1(e) + + dsrl $25,$30,28 + daddu $14,$17 + ld $17,16($6) # K[2] + dsll $16,$30,25 + daddu $14,$15 + dsrl $15,$30,34 + xor $25,$16 + dsll $16,$30,30 + xor $25,$15 + dsrl $15,$30,39 + xor $25,$16 + dsll $16,$30,36 + xor $25,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#endif + sd $10,16($29) # offload to ring buffer + daddu $25,$15 + and $16,$30 + daddu $14,$17 # +=K[2] + daddu $25,$16 # +=Maj(a,b,c) + daddu $2,$14 + daddu $25,$14 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $12,32($5) +#else + ldl $12,39($5) + ldr $12,32($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $11,$11 # byte swap(3) + dshd $11,$11 +#else + ori $16,$0,0xFF + dsll $18,$16,32 + or $16,$18 # 0x000000FF000000FF + and $17,$11,$16 # byte swap(3) + dsrl $18,$11,24 + dsll $17,24 + and $18,$16 + dsll $16,8 # 0x0000FF000000FF00 + or $17,$18 + and $18,$11,$16 + dsrl $11,8 + dsll $18,8 + and $11,$16 + or $17,$18 + or $11,$17 + dsrl $17,$11,32 + dsll $11,32 + or $11,$17 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $18,$3,$7 # 3 + drotr $16,$2,14 + daddu $15,$11,$24 + drotr $17,$2,18 + and $18,$2 + drotr $24,$2,41 + xor $16,$17 + drotr $17,$25,28 + xor $18,$7 # Ch(e,f,g) + xor $16,$24 # Sigma1(e) + + drotr $24,$25,34 + daddu $15,$18 + ld $18,24($6) # K[3] + xor $24,$17 + drotr $17,$25,39 + daddu $15,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#else + daddu $15,$11,$24 # 3 + dsrl $24,$2,14 + xor $18,$3,$7 + dsll $17,$2,23 + and $18,$2 + dsrl $16,$2,18 + xor $24,$17 + dsll $17,$2,46 + xor $24,$16 + dsrl $16,$2,41 + xor $24,$17 + dsll $17,$2,50 + xor $24,$16 + xor $18,$7 # Ch(e,f,g) + xor $16,$17,$24 # Sigma1(e) + + dsrl $24,$25,28 + daddu $15,$18 + ld $18,24($6) # K[3] + dsll $17,$25,25 + daddu $15,$16 + dsrl $16,$25,34 + xor $24,$17 + dsll $17,$25,30 + xor $24,$16 + dsrl $16,$25,39 + xor $24,$17 + dsll $17,$25,36 + xor $24,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#endif + sd $11,24($29) # offload to ring buffer + daddu $24,$16 + and $17,$25 + daddu $15,$18 # +=K[3] + daddu $24,$17 # +=Maj(a,b,c) + daddu $1,$15 + daddu $24,$15 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $13,40($5) +#else + ldl $13,47($5) + ldr $13,40($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $12,$12 # byte swap(4) + dshd $12,$12 +#else + ori $17,$0,0xFF + dsll $19,$17,32 + or $17,$19 # 0x000000FF000000FF + and $18,$12,$17 # byte swap(4) + dsrl $19,$12,24 + dsll $18,24 + and $19,$17 + dsll $17,8 # 0x0000FF000000FF00 + or $18,$19 + and $19,$12,$17 + dsrl $12,8 + dsll $19,8 + and $12,$17 + or $18,$19 + or $12,$18 + dsrl $18,$12,32 + dsll $12,32 + or $12,$18 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $19,$2,$3 # 4 + drotr $17,$1,14 + daddu $16,$12,$7 + drotr $18,$1,18 + and $19,$1 + drotr $7,$1,41 + xor $17,$18 + drotr $18,$24,28 + xor $19,$3 # Ch(e,f,g) + xor $17,$7 # Sigma1(e) + + drotr $7,$24,34 + daddu $16,$19 + ld $19,32($6) # K[4] + xor $7,$18 + drotr $18,$24,39 + daddu $16,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#else + daddu $16,$12,$7 # 4 + dsrl $7,$1,14 + xor $19,$2,$3 + dsll $18,$1,23 + and $19,$1 + dsrl $17,$1,18 + xor $7,$18 + dsll $18,$1,46 + xor $7,$17 + dsrl $17,$1,41 + xor $7,$18 + dsll $18,$1,50 + xor $7,$17 + xor $19,$3 # Ch(e,f,g) + xor $17,$18,$7 # Sigma1(e) + + dsrl $7,$24,28 + daddu $16,$19 + ld $19,32($6) # K[4] + dsll $18,$24,25 + daddu $16,$17 + dsrl $17,$24,34 + xor $7,$18 + dsll $18,$24,30 + xor $7,$17 + dsrl $17,$24,39 + xor $7,$18 + dsll $18,$24,36 + xor $7,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#endif + sd $12,32($29) # offload to ring buffer + daddu $7,$17 + and $18,$24 + daddu $16,$19 # +=K[4] + daddu $7,$18 # +=Maj(a,b,c) + daddu $31,$16 + daddu $7,$16 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $14,48($5) +#else + ldl $14,55($5) + ldr $14,48($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $13,$13 # byte swap(5) + dshd $13,$13 +#else + ori $18,$0,0xFF + dsll $20,$18,32 + or $18,$20 # 0x000000FF000000FF + and $19,$13,$18 # byte swap(5) + dsrl $20,$13,24 + dsll $19,24 + and $20,$18 + dsll $18,8 # 0x0000FF000000FF00 + or $19,$20 + and $20,$13,$18 + dsrl $13,8 + dsll $20,8 + and $13,$18 + or $19,$20 + or $13,$19 + dsrl $19,$13,32 + dsll $13,32 + or $13,$19 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $20,$1,$2 # 5 + drotr $18,$31,14 + daddu $17,$13,$3 + drotr $19,$31,18 + and $20,$31 + drotr $3,$31,41 + xor $18,$19 + drotr $19,$7,28 + xor $20,$2 # Ch(e,f,g) + xor $18,$3 # Sigma1(e) + + drotr $3,$7,34 + daddu $17,$20 + ld $20,40($6) # K[5] + xor $3,$19 + drotr $19,$7,39 + daddu $17,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#else + daddu $17,$13,$3 # 5 + dsrl $3,$31,14 + xor $20,$1,$2 + dsll $19,$31,23 + and $20,$31 + dsrl $18,$31,18 + xor $3,$19 + dsll $19,$31,46 + xor $3,$18 + dsrl $18,$31,41 + xor $3,$19 + dsll $19,$31,50 + xor $3,$18 + xor $20,$2 # Ch(e,f,g) + xor $18,$19,$3 # Sigma1(e) + + dsrl $3,$7,28 + daddu $17,$20 + ld $20,40($6) # K[5] + dsll $19,$7,25 + daddu $17,$18 + dsrl $18,$7,34 + xor $3,$19 + dsll $19,$7,30 + xor $3,$18 + dsrl $18,$7,39 + xor $3,$19 + dsll $19,$7,36 + xor $3,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#endif + sd $13,40($29) # offload to ring buffer + daddu $3,$18 + and $19,$7 + daddu $17,$20 # +=K[5] + daddu $3,$19 # +=Maj(a,b,c) + daddu $30,$17 + daddu $3,$17 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $15,56($5) +#else + ldl $15,63($5) + ldr $15,56($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $14,$14 # byte swap(6) + dshd $14,$14 +#else + ori $19,$0,0xFF + dsll $21,$19,32 + or $19,$21 # 0x000000FF000000FF + and $20,$14,$19 # byte swap(6) + dsrl $21,$14,24 + dsll $20,24 + and $21,$19 + dsll $19,8 # 0x0000FF000000FF00 + or $20,$21 + and $21,$14,$19 + dsrl $14,8 + dsll $21,8 + and $14,$19 + or $20,$21 + or $14,$20 + dsrl $20,$14,32 + dsll $14,32 + or $14,$20 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $21,$31,$1 # 6 + drotr $19,$30,14 + daddu $18,$14,$2 + drotr $20,$30,18 + and $21,$30 + drotr $2,$30,41 + xor $19,$20 + drotr $20,$3,28 + xor $21,$1 # Ch(e,f,g) + xor $19,$2 # Sigma1(e) + + drotr $2,$3,34 + daddu $18,$21 + ld $21,48($6) # K[6] + xor $2,$20 + drotr $20,$3,39 + daddu $18,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#else + daddu $18,$14,$2 # 6 + dsrl $2,$30,14 + xor $21,$31,$1 + dsll $20,$30,23 + and $21,$30 + dsrl $19,$30,18 + xor $2,$20 + dsll $20,$30,46 + xor $2,$19 + dsrl $19,$30,41 + xor $2,$20 + dsll $20,$30,50 + xor $2,$19 + xor $21,$1 # Ch(e,f,g) + xor $19,$20,$2 # Sigma1(e) + + dsrl $2,$3,28 + daddu $18,$21 + ld $21,48($6) # K[6] + dsll $20,$3,25 + daddu $18,$19 + dsrl $19,$3,34 + xor $2,$20 + dsll $20,$3,30 + xor $2,$19 + dsrl $19,$3,39 + xor $2,$20 + dsll $20,$3,36 + xor $2,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#endif + sd $14,48($29) # offload to ring buffer + daddu $2,$19 + and $20,$3 + daddu $18,$21 # +=K[6] + daddu $2,$20 # +=Maj(a,b,c) + daddu $25,$18 + daddu $2,$18 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $16,64($5) +#else + ldl $16,71($5) + ldr $16,64($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $15,$15 # byte swap(7) + dshd $15,$15 +#else + ori $20,$0,0xFF + dsll $22,$20,32 + or $20,$22 # 0x000000FF000000FF + and $21,$15,$20 # byte swap(7) + dsrl $22,$15,24 + dsll $21,24 + and $22,$20 + dsll $20,8 # 0x0000FF000000FF00 + or $21,$22 + and $22,$15,$20 + dsrl $15,8 + dsll $22,8 + and $15,$20 + or $21,$22 + or $15,$21 + dsrl $21,$15,32 + dsll $15,32 + or $15,$21 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $22,$30,$31 # 7 + drotr $20,$25,14 + daddu $19,$15,$1 + drotr $21,$25,18 + and $22,$25 + drotr $1,$25,41 + xor $20,$21 + drotr $21,$2,28 + xor $22,$31 # Ch(e,f,g) + xor $20,$1 # Sigma1(e) + + drotr $1,$2,34 + daddu $19,$22 + ld $22,56($6) # K[7] + xor $1,$21 + drotr $21,$2,39 + daddu $19,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#else + daddu $19,$15,$1 # 7 + dsrl $1,$25,14 + xor $22,$30,$31 + dsll $21,$25,23 + and $22,$25 + dsrl $20,$25,18 + xor $1,$21 + dsll $21,$25,46 + xor $1,$20 + dsrl $20,$25,41 + xor $1,$21 + dsll $21,$25,50 + xor $1,$20 + xor $22,$31 # Ch(e,f,g) + xor $20,$21,$1 # Sigma1(e) + + dsrl $1,$2,28 + daddu $19,$22 + ld $22,56($6) # K[7] + dsll $21,$2,25 + daddu $19,$20 + dsrl $20,$2,34 + xor $1,$21 + dsll $21,$2,30 + xor $1,$20 + dsrl $20,$2,39 + xor $1,$21 + dsll $21,$2,36 + xor $1,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#endif + sd $15,56($29) # offload to ring buffer + daddu $1,$20 + and $21,$2 + daddu $19,$22 # +=K[7] + daddu $1,$21 # +=Maj(a,b,c) + daddu $24,$19 + daddu $1,$19 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $17,72($5) +#else + ldl $17,79($5) + ldr $17,72($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $16,$16 # byte swap(8) + dshd $16,$16 +#else + ori $21,$0,0xFF + dsll $23,$21,32 + or $21,$23 # 0x000000FF000000FF + and $22,$16,$21 # byte swap(8) + dsrl $23,$16,24 + dsll $22,24 + and $23,$21 + dsll $21,8 # 0x0000FF000000FF00 + or $22,$23 + and $23,$16,$21 + dsrl $16,8 + dsll $23,8 + and $16,$21 + or $22,$23 + or $16,$22 + dsrl $22,$16,32 + dsll $16,32 + or $16,$22 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $23,$25,$30 # 8 + drotr $21,$24,14 + daddu $20,$16,$31 + drotr $22,$24,18 + and $23,$24 + drotr $31,$24,41 + xor $21,$22 + drotr $22,$1,28 + xor $23,$30 # Ch(e,f,g) + xor $21,$31 # Sigma1(e) + + drotr $31,$1,34 + daddu $20,$23 + ld $23,64($6) # K[8] + xor $31,$22 + drotr $22,$1,39 + daddu $20,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#else + daddu $20,$16,$31 # 8 + dsrl $31,$24,14 + xor $23,$25,$30 + dsll $22,$24,23 + and $23,$24 + dsrl $21,$24,18 + xor $31,$22 + dsll $22,$24,46 + xor $31,$21 + dsrl $21,$24,41 + xor $31,$22 + dsll $22,$24,50 + xor $31,$21 + xor $23,$30 # Ch(e,f,g) + xor $21,$22,$31 # Sigma1(e) + + dsrl $31,$1,28 + daddu $20,$23 + ld $23,64($6) # K[8] + dsll $22,$1,25 + daddu $20,$21 + dsrl $21,$1,34 + xor $31,$22 + dsll $22,$1,30 + xor $31,$21 + dsrl $21,$1,39 + xor $31,$22 + dsll $22,$1,36 + xor $31,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#endif + sd $16,64($29) # offload to ring buffer + daddu $31,$21 + and $22,$1 + daddu $20,$23 # +=K[8] + daddu $31,$22 # +=Maj(a,b,c) + daddu $7,$20 + daddu $31,$20 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $18,80($5) +#else + ldl $18,87($5) + ldr $18,80($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $17,$17 # byte swap(9) + dshd $17,$17 +#else + ori $22,$0,0xFF + dsll $8,$22,32 + or $22,$8 # 0x000000FF000000FF + and $23,$17,$22 # byte swap(9) + dsrl $8,$17,24 + dsll $23,24 + and $8,$22 + dsll $22,8 # 0x0000FF000000FF00 + or $23,$8 + and $8,$17,$22 + dsrl $17,8 + dsll $8,8 + and $17,$22 + or $23,$8 + or $17,$23 + dsrl $23,$17,32 + dsll $17,32 + or $17,$23 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $8,$24,$25 # 9 + drotr $22,$7,14 + daddu $21,$17,$30 + drotr $23,$7,18 + and $8,$7 + drotr $30,$7,41 + xor $22,$23 + drotr $23,$31,28 + xor $8,$25 # Ch(e,f,g) + xor $22,$30 # Sigma1(e) + + drotr $30,$31,34 + daddu $21,$8 + ld $8,72($6) # K[9] + xor $30,$23 + drotr $23,$31,39 + daddu $21,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#else + daddu $21,$17,$30 # 9 + dsrl $30,$7,14 + xor $8,$24,$25 + dsll $23,$7,23 + and $8,$7 + dsrl $22,$7,18 + xor $30,$23 + dsll $23,$7,46 + xor $30,$22 + dsrl $22,$7,41 + xor $30,$23 + dsll $23,$7,50 + xor $30,$22 + xor $8,$25 # Ch(e,f,g) + xor $22,$23,$30 # Sigma1(e) + + dsrl $30,$31,28 + daddu $21,$8 + ld $8,72($6) # K[9] + dsll $23,$31,25 + daddu $21,$22 + dsrl $22,$31,34 + xor $30,$23 + dsll $23,$31,30 + xor $30,$22 + dsrl $22,$31,39 + xor $30,$23 + dsll $23,$31,36 + xor $30,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#endif + sd $17,72($29) # offload to ring buffer + daddu $30,$22 + and $23,$31 + daddu $21,$8 # +=K[9] + daddu $30,$23 # +=Maj(a,b,c) + daddu $3,$21 + daddu $30,$21 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $19,88($5) +#else + ldl $19,95($5) + ldr $19,88($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $18,$18 # byte swap(10) + dshd $18,$18 +#else + ori $23,$0,0xFF + dsll $9,$23,32 + or $23,$9 # 0x000000FF000000FF + and $8,$18,$23 # byte swap(10) + dsrl $9,$18,24 + dsll $8,24 + and $9,$23 + dsll $23,8 # 0x0000FF000000FF00 + or $8,$9 + and $9,$18,$23 + dsrl $18,8 + dsll $9,8 + and $18,$23 + or $8,$9 + or $18,$8 + dsrl $8,$18,32 + dsll $18,32 + or $18,$8 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $9,$7,$24 # 10 + drotr $23,$3,14 + daddu $22,$18,$25 + drotr $8,$3,18 + and $9,$3 + drotr $25,$3,41 + xor $23,$8 + drotr $8,$30,28 + xor $9,$24 # Ch(e,f,g) + xor $23,$25 # Sigma1(e) + + drotr $25,$30,34 + daddu $22,$9 + ld $9,80($6) # K[10] + xor $25,$8 + drotr $8,$30,39 + daddu $22,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#else + daddu $22,$18,$25 # 10 + dsrl $25,$3,14 + xor $9,$7,$24 + dsll $8,$3,23 + and $9,$3 + dsrl $23,$3,18 + xor $25,$8 + dsll $8,$3,46 + xor $25,$23 + dsrl $23,$3,41 + xor $25,$8 + dsll $8,$3,50 + xor $25,$23 + xor $9,$24 # Ch(e,f,g) + xor $23,$8,$25 # Sigma1(e) + + dsrl $25,$30,28 + daddu $22,$9 + ld $9,80($6) # K[10] + dsll $8,$30,25 + daddu $22,$23 + dsrl $23,$30,34 + xor $25,$8 + dsll $8,$30,30 + xor $25,$23 + dsrl $23,$30,39 + xor $25,$8 + dsll $8,$30,36 + xor $25,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#endif + sd $18,80($29) # offload to ring buffer + daddu $25,$23 + and $8,$30 + daddu $22,$9 # +=K[10] + daddu $25,$8 # +=Maj(a,b,c) + daddu $2,$22 + daddu $25,$22 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $20,96($5) +#else + ldl $20,103($5) + ldr $20,96($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $19,$19 # byte swap(11) + dshd $19,$19 +#else + ori $8,$0,0xFF + dsll $10,$8,32 + or $8,$10 # 0x000000FF000000FF + and $9,$19,$8 # byte swap(11) + dsrl $10,$19,24 + dsll $9,24 + and $10,$8 + dsll $8,8 # 0x0000FF000000FF00 + or $9,$10 + and $10,$19,$8 + dsrl $19,8 + dsll $10,8 + and $19,$8 + or $9,$10 + or $19,$9 + dsrl $9,$19,32 + dsll $19,32 + or $19,$9 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $10,$3,$7 # 11 + drotr $8,$2,14 + daddu $23,$19,$24 + drotr $9,$2,18 + and $10,$2 + drotr $24,$2,41 + xor $8,$9 + drotr $9,$25,28 + xor $10,$7 # Ch(e,f,g) + xor $8,$24 # Sigma1(e) + + drotr $24,$25,34 + daddu $23,$10 + ld $10,88($6) # K[11] + xor $24,$9 + drotr $9,$25,39 + daddu $23,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#else + daddu $23,$19,$24 # 11 + dsrl $24,$2,14 + xor $10,$3,$7 + dsll $9,$2,23 + and $10,$2 + dsrl $8,$2,18 + xor $24,$9 + dsll $9,$2,46 + xor $24,$8 + dsrl $8,$2,41 + xor $24,$9 + dsll $9,$2,50 + xor $24,$8 + xor $10,$7 # Ch(e,f,g) + xor $8,$9,$24 # Sigma1(e) + + dsrl $24,$25,28 + daddu $23,$10 + ld $10,88($6) # K[11] + dsll $9,$25,25 + daddu $23,$8 + dsrl $8,$25,34 + xor $24,$9 + dsll $9,$25,30 + xor $24,$8 + dsrl $8,$25,39 + xor $24,$9 + dsll $9,$25,36 + xor $24,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#endif + sd $19,88($29) # offload to ring buffer + daddu $24,$8 + and $9,$25 + daddu $23,$10 # +=K[11] + daddu $24,$9 # +=Maj(a,b,c) + daddu $1,$23 + daddu $24,$23 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $21,104($5) +#else + ldl $21,111($5) + ldr $21,104($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $20,$20 # byte swap(12) + dshd $20,$20 +#else + ori $9,$0,0xFF + dsll $11,$9,32 + or $9,$11 # 0x000000FF000000FF + and $10,$20,$9 # byte swap(12) + dsrl $11,$20,24 + dsll $10,24 + and $11,$9 + dsll $9,8 # 0x0000FF000000FF00 + or $10,$11 + and $11,$20,$9 + dsrl $20,8 + dsll $11,8 + and $20,$9 + or $10,$11 + or $20,$10 + dsrl $10,$20,32 + dsll $20,32 + or $20,$10 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $11,$2,$3 # 12 + drotr $9,$1,14 + daddu $8,$20,$7 + drotr $10,$1,18 + and $11,$1 + drotr $7,$1,41 + xor $9,$10 + drotr $10,$24,28 + xor $11,$3 # Ch(e,f,g) + xor $9,$7 # Sigma1(e) + + drotr $7,$24,34 + daddu $8,$11 + ld $11,96($6) # K[12] + xor $7,$10 + drotr $10,$24,39 + daddu $8,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#else + daddu $8,$20,$7 # 12 + dsrl $7,$1,14 + xor $11,$2,$3 + dsll $10,$1,23 + and $11,$1 + dsrl $9,$1,18 + xor $7,$10 + dsll $10,$1,46 + xor $7,$9 + dsrl $9,$1,41 + xor $7,$10 + dsll $10,$1,50 + xor $7,$9 + xor $11,$3 # Ch(e,f,g) + xor $9,$10,$7 # Sigma1(e) + + dsrl $7,$24,28 + daddu $8,$11 + ld $11,96($6) # K[12] + dsll $10,$24,25 + daddu $8,$9 + dsrl $9,$24,34 + xor $7,$10 + dsll $10,$24,30 + xor $7,$9 + dsrl $9,$24,39 + xor $7,$10 + dsll $10,$24,36 + xor $7,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#endif + sd $20,96($29) # offload to ring buffer + daddu $7,$9 + and $10,$24 + daddu $8,$11 # +=K[12] + daddu $7,$10 # +=Maj(a,b,c) + daddu $31,$8 + daddu $7,$8 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $22,112($5) +#else + ldl $22,119($5) + ldr $22,112($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $21,$21 # byte swap(13) + dshd $21,$21 +#else + ori $10,$0,0xFF + dsll $12,$10,32 + or $10,$12 # 0x000000FF000000FF + and $11,$21,$10 # byte swap(13) + dsrl $12,$21,24 + dsll $11,24 + and $12,$10 + dsll $10,8 # 0x0000FF000000FF00 + or $11,$12 + and $12,$21,$10 + dsrl $21,8 + dsll $12,8 + and $21,$10 + or $11,$12 + or $21,$11 + dsrl $11,$21,32 + dsll $21,32 + or $21,$11 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $12,$1,$2 # 13 + drotr $10,$31,14 + daddu $9,$21,$3 + drotr $11,$31,18 + and $12,$31 + drotr $3,$31,41 + xor $10,$11 + drotr $11,$7,28 + xor $12,$2 # Ch(e,f,g) + xor $10,$3 # Sigma1(e) + + drotr $3,$7,34 + daddu $9,$12 + ld $12,104($6) # K[13] + xor $3,$11 + drotr $11,$7,39 + daddu $9,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#else + daddu $9,$21,$3 # 13 + dsrl $3,$31,14 + xor $12,$1,$2 + dsll $11,$31,23 + and $12,$31 + dsrl $10,$31,18 + xor $3,$11 + dsll $11,$31,46 + xor $3,$10 + dsrl $10,$31,41 + xor $3,$11 + dsll $11,$31,50 + xor $3,$10 + xor $12,$2 # Ch(e,f,g) + xor $10,$11,$3 # Sigma1(e) + + dsrl $3,$7,28 + daddu $9,$12 + ld $12,104($6) # K[13] + dsll $11,$7,25 + daddu $9,$10 + dsrl $10,$7,34 + xor $3,$11 + dsll $11,$7,30 + xor $3,$10 + dsrl $10,$7,39 + xor $3,$11 + dsll $11,$7,36 + xor $3,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#endif + sd $21,104($29) # offload to ring buffer + daddu $3,$10 + and $11,$7 + daddu $9,$12 # +=K[13] + daddu $3,$11 # +=Maj(a,b,c) + daddu $30,$9 + daddu $3,$9 + ld $8,0($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $23,120($5) +#else + ldl $23,127($5) + ldr $23,120($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $22,$22 # byte swap(14) + dshd $22,$22 +#else + ori $11,$0,0xFF + dsll $13,$11,32 + or $11,$13 # 0x000000FF000000FF + and $12,$22,$11 # byte swap(14) + dsrl $13,$22,24 + dsll $12,24 + and $13,$11 + dsll $11,8 # 0x0000FF000000FF00 + or $12,$13 + and $13,$22,$11 + dsrl $22,8 + dsll $13,8 + and $22,$11 + or $12,$13 + or $22,$12 + dsrl $12,$22,32 + dsll $22,32 + or $22,$12 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $13,$31,$1 # 14 + drotr $11,$30,14 + daddu $10,$22,$2 + drotr $12,$30,18 + and $13,$30 + drotr $2,$30,41 + xor $11,$12 + drotr $12,$3,28 + xor $13,$1 # Ch(e,f,g) + xor $11,$2 # Sigma1(e) + + drotr $2,$3,34 + daddu $10,$13 + ld $13,112($6) # K[14] + xor $2,$12 + drotr $12,$3,39 + daddu $10,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#else + daddu $10,$22,$2 # 14 + dsrl $2,$30,14 + xor $13,$31,$1 + dsll $12,$30,23 + and $13,$30 + dsrl $11,$30,18 + xor $2,$12 + dsll $12,$30,46 + xor $2,$11 + dsrl $11,$30,41 + xor $2,$12 + dsll $12,$30,50 + xor $2,$11 + xor $13,$1 # Ch(e,f,g) + xor $11,$12,$2 # Sigma1(e) + + dsrl $2,$3,28 + daddu $10,$13 + ld $13,112($6) # K[14] + dsll $12,$3,25 + daddu $10,$11 + dsrl $11,$3,34 + xor $2,$12 + dsll $12,$3,30 + xor $2,$11 + dsrl $11,$3,39 + xor $2,$12 + dsll $12,$3,36 + xor $2,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#endif + sd $22,112($29) # offload to ring buffer + daddu $2,$11 + and $12,$3 + daddu $10,$13 # +=K[14] + daddu $2,$12 # +=Maj(a,b,c) + daddu $25,$10 + daddu $2,$10 + ld $9,8($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $23,$23 # byte swap(15) + dshd $23,$23 +#else + ori $12,$0,0xFF + dsll $14,$12,32 + or $12,$14 # 0x000000FF000000FF + and $13,$23,$12 # byte swap(15) + dsrl $14,$23,24 + dsll $13,24 + and $14,$12 + dsll $12,8 # 0x0000FF000000FF00 + or $13,$14 + and $14,$23,$12 + dsrl $23,8 + dsll $14,8 + and $23,$12 + or $13,$14 + or $23,$13 + dsrl $13,$23,32 + dsll $23,32 + or $23,$13 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $14,$30,$31 # 15 + drotr $12,$25,14 + daddu $11,$23,$1 + drotr $13,$25,18 + and $14,$25 + drotr $1,$25,41 + xor $12,$13 + drotr $13,$2,28 + xor $14,$31 # Ch(e,f,g) + xor $12,$1 # Sigma1(e) + + drotr $1,$2,34 + daddu $11,$14 + ld $14,120($6) # K[15] + xor $1,$13 + drotr $13,$2,39 + daddu $11,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#else + daddu $11,$23,$1 # 15 + dsrl $1,$25,14 + xor $14,$30,$31 + dsll $13,$25,23 + and $14,$25 + dsrl $12,$25,18 + xor $1,$13 + dsll $13,$25,46 + xor $1,$12 + dsrl $12,$25,41 + xor $1,$13 + dsll $13,$25,50 + xor $1,$12 + xor $14,$31 # Ch(e,f,g) + xor $12,$13,$1 # Sigma1(e) + + dsrl $1,$2,28 + daddu $11,$14 + ld $14,120($6) # K[15] + dsll $13,$2,25 + daddu $11,$12 + dsrl $12,$2,34 + xor $1,$13 + dsll $13,$2,30 + xor $1,$12 + dsrl $12,$2,39 + xor $1,$13 + dsll $13,$2,36 + xor $1,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#endif + sd $23,120($29) # offload to ring buffer + daddu $1,$12 + and $13,$2 + daddu $11,$14 # +=K[15] + daddu $1,$13 # +=Maj(a,b,c) + daddu $24,$11 + daddu $1,$11 + ld $10,16($29) # prefetch from ring buffer + b .L16_xx +.align 4 +.L16_xx: +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $14,$9,7 # Xupdate(16) + drotr $12,$9,1 + daddu $8,$17 # +=X[i+9] + xor $14,$12 + drotr $12,$9,8 + + dsrl $15,$22,6 + drotr $13,$22,19 + xor $14,$12 # sigma0(X[i+1]) + drotr $12,$22,61 + xor $15,$13 + daddu $8,$14 +#else + dsrl $14,$9,7 # Xupdate(16) + daddu $8,$17 # +=X[i+9] + dsll $13,$9,56 + dsrl $12,$9,1 + xor $14,$13 + dsll $13,7 + xor $14,$12 + dsrl $12,$9,8 + xor $14,$13 + + dsrl $15,$22,6 + xor $14,$12 # sigma0(X[i+1]) + dsll $13,$22,3 + daddu $8,$14 + dsrl $12,$22,19 + xor $15,$13 + dsll $13,42 + xor $15,$12 + dsrl $12,$22,61 + xor $15,$13 +#endif + xor $15,$12 # sigma1(X[i+14]) + daddu $8,$15 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $15,$25,$30 # 16 + drotr $13,$24,14 + daddu $12,$8,$31 + drotr $14,$24,18 + and $15,$24 + drotr $31,$24,41 + xor $13,$14 + drotr $14,$1,28 + xor $15,$30 # Ch(e,f,g) + xor $13,$31 # Sigma1(e) + + drotr $31,$1,34 + daddu $12,$15 + ld $15,128($6) # K[16] + xor $31,$14 + drotr $14,$1,39 + daddu $12,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#else + daddu $12,$8,$31 # 16 + dsrl $31,$24,14 + xor $15,$25,$30 + dsll $14,$24,23 + and $15,$24 + dsrl $13,$24,18 + xor $31,$14 + dsll $14,$24,46 + xor $31,$13 + dsrl $13,$24,41 + xor $31,$14 + dsll $14,$24,50 + xor $31,$13 + xor $15,$30 # Ch(e,f,g) + xor $13,$14,$31 # Sigma1(e) + + dsrl $31,$1,28 + daddu $12,$15 + ld $15,128($6) # K[16] + dsll $14,$1,25 + daddu $12,$13 + dsrl $13,$1,34 + xor $31,$14 + dsll $14,$1,30 + xor $31,$13 + dsrl $13,$1,39 + xor $31,$14 + dsll $14,$1,36 + xor $31,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#endif + sd $8,0($29) # offload to ring buffer + daddu $31,$13 + and $14,$1 + daddu $12,$15 # +=K[16] + daddu $31,$14 # +=Maj(a,b,c) + daddu $7,$12 + daddu $31,$12 + ld $11,24($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $15,$10,7 # Xupdate(17) + drotr $13,$10,1 + daddu $9,$18 # +=X[i+9] + xor $15,$13 + drotr $13,$10,8 + + dsrl $16,$23,6 + drotr $14,$23,19 + xor $15,$13 # sigma0(X[i+1]) + drotr $13,$23,61 + xor $16,$14 + daddu $9,$15 +#else + dsrl $15,$10,7 # Xupdate(17) + daddu $9,$18 # +=X[i+9] + dsll $14,$10,56 + dsrl $13,$10,1 + xor $15,$14 + dsll $14,7 + xor $15,$13 + dsrl $13,$10,8 + xor $15,$14 + + dsrl $16,$23,6 + xor $15,$13 # sigma0(X[i+1]) + dsll $14,$23,3 + daddu $9,$15 + dsrl $13,$23,19 + xor $16,$14 + dsll $14,42 + xor $16,$13 + dsrl $13,$23,61 + xor $16,$14 +#endif + xor $16,$13 # sigma1(X[i+14]) + daddu $9,$16 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $16,$24,$25 # 17 + drotr $14,$7,14 + daddu $13,$9,$30 + drotr $15,$7,18 + and $16,$7 + drotr $30,$7,41 + xor $14,$15 + drotr $15,$31,28 + xor $16,$25 # Ch(e,f,g) + xor $14,$30 # Sigma1(e) + + drotr $30,$31,34 + daddu $13,$16 + ld $16,136($6) # K[17] + xor $30,$15 + drotr $15,$31,39 + daddu $13,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#else + daddu $13,$9,$30 # 17 + dsrl $30,$7,14 + xor $16,$24,$25 + dsll $15,$7,23 + and $16,$7 + dsrl $14,$7,18 + xor $30,$15 + dsll $15,$7,46 + xor $30,$14 + dsrl $14,$7,41 + xor $30,$15 + dsll $15,$7,50 + xor $30,$14 + xor $16,$25 # Ch(e,f,g) + xor $14,$15,$30 # Sigma1(e) + + dsrl $30,$31,28 + daddu $13,$16 + ld $16,136($6) # K[17] + dsll $15,$31,25 + daddu $13,$14 + dsrl $14,$31,34 + xor $30,$15 + dsll $15,$31,30 + xor $30,$14 + dsrl $14,$31,39 + xor $30,$15 + dsll $15,$31,36 + xor $30,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#endif + sd $9,8($29) # offload to ring buffer + daddu $30,$14 + and $15,$31 + daddu $13,$16 # +=K[17] + daddu $30,$15 # +=Maj(a,b,c) + daddu $3,$13 + daddu $30,$13 + ld $12,32($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $16,$11,7 # Xupdate(18) + drotr $14,$11,1 + daddu $10,$19 # +=X[i+9] + xor $16,$14 + drotr $14,$11,8 + + dsrl $17,$8,6 + drotr $15,$8,19 + xor $16,$14 # sigma0(X[i+1]) + drotr $14,$8,61 + xor $17,$15 + daddu $10,$16 +#else + dsrl $16,$11,7 # Xupdate(18) + daddu $10,$19 # +=X[i+9] + dsll $15,$11,56 + dsrl $14,$11,1 + xor $16,$15 + dsll $15,7 + xor $16,$14 + dsrl $14,$11,8 + xor $16,$15 + + dsrl $17,$8,6 + xor $16,$14 # sigma0(X[i+1]) + dsll $15,$8,3 + daddu $10,$16 + dsrl $14,$8,19 + xor $17,$15 + dsll $15,42 + xor $17,$14 + dsrl $14,$8,61 + xor $17,$15 +#endif + xor $17,$14 # sigma1(X[i+14]) + daddu $10,$17 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $17,$7,$24 # 18 + drotr $15,$3,14 + daddu $14,$10,$25 + drotr $16,$3,18 + and $17,$3 + drotr $25,$3,41 + xor $15,$16 + drotr $16,$30,28 + xor $17,$24 # Ch(e,f,g) + xor $15,$25 # Sigma1(e) + + drotr $25,$30,34 + daddu $14,$17 + ld $17,144($6) # K[18] + xor $25,$16 + drotr $16,$30,39 + daddu $14,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#else + daddu $14,$10,$25 # 18 + dsrl $25,$3,14 + xor $17,$7,$24 + dsll $16,$3,23 + and $17,$3 + dsrl $15,$3,18 + xor $25,$16 + dsll $16,$3,46 + xor $25,$15 + dsrl $15,$3,41 + xor $25,$16 + dsll $16,$3,50 + xor $25,$15 + xor $17,$24 # Ch(e,f,g) + xor $15,$16,$25 # Sigma1(e) + + dsrl $25,$30,28 + daddu $14,$17 + ld $17,144($6) # K[18] + dsll $16,$30,25 + daddu $14,$15 + dsrl $15,$30,34 + xor $25,$16 + dsll $16,$30,30 + xor $25,$15 + dsrl $15,$30,39 + xor $25,$16 + dsll $16,$30,36 + xor $25,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#endif + sd $10,16($29) # offload to ring buffer + daddu $25,$15 + and $16,$30 + daddu $14,$17 # +=K[18] + daddu $25,$16 # +=Maj(a,b,c) + daddu $2,$14 + daddu $25,$14 + ld $13,40($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $17,$12,7 # Xupdate(19) + drotr $15,$12,1 + daddu $11,$20 # +=X[i+9] + xor $17,$15 + drotr $15,$12,8 + + dsrl $18,$9,6 + drotr $16,$9,19 + xor $17,$15 # sigma0(X[i+1]) + drotr $15,$9,61 + xor $18,$16 + daddu $11,$17 +#else + dsrl $17,$12,7 # Xupdate(19) + daddu $11,$20 # +=X[i+9] + dsll $16,$12,56 + dsrl $15,$12,1 + xor $17,$16 + dsll $16,7 + xor $17,$15 + dsrl $15,$12,8 + xor $17,$16 + + dsrl $18,$9,6 + xor $17,$15 # sigma0(X[i+1]) + dsll $16,$9,3 + daddu $11,$17 + dsrl $15,$9,19 + xor $18,$16 + dsll $16,42 + xor $18,$15 + dsrl $15,$9,61 + xor $18,$16 +#endif + xor $18,$15 # sigma1(X[i+14]) + daddu $11,$18 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $18,$3,$7 # 19 + drotr $16,$2,14 + daddu $15,$11,$24 + drotr $17,$2,18 + and $18,$2 + drotr $24,$2,41 + xor $16,$17 + drotr $17,$25,28 + xor $18,$7 # Ch(e,f,g) + xor $16,$24 # Sigma1(e) + + drotr $24,$25,34 + daddu $15,$18 + ld $18,152($6) # K[19] + xor $24,$17 + drotr $17,$25,39 + daddu $15,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#else + daddu $15,$11,$24 # 19 + dsrl $24,$2,14 + xor $18,$3,$7 + dsll $17,$2,23 + and $18,$2 + dsrl $16,$2,18 + xor $24,$17 + dsll $17,$2,46 + xor $24,$16 + dsrl $16,$2,41 + xor $24,$17 + dsll $17,$2,50 + xor $24,$16 + xor $18,$7 # Ch(e,f,g) + xor $16,$17,$24 # Sigma1(e) + + dsrl $24,$25,28 + daddu $15,$18 + ld $18,152($6) # K[19] + dsll $17,$25,25 + daddu $15,$16 + dsrl $16,$25,34 + xor $24,$17 + dsll $17,$25,30 + xor $24,$16 + dsrl $16,$25,39 + xor $24,$17 + dsll $17,$25,36 + xor $24,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#endif + sd $11,24($29) # offload to ring buffer + daddu $24,$16 + and $17,$25 + daddu $15,$18 # +=K[19] + daddu $24,$17 # +=Maj(a,b,c) + daddu $1,$15 + daddu $24,$15 + ld $14,48($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $18,$13,7 # Xupdate(20) + drotr $16,$13,1 + daddu $12,$21 # +=X[i+9] + xor $18,$16 + drotr $16,$13,8 + + dsrl $19,$10,6 + drotr $17,$10,19 + xor $18,$16 # sigma0(X[i+1]) + drotr $16,$10,61 + xor $19,$17 + daddu $12,$18 +#else + dsrl $18,$13,7 # Xupdate(20) + daddu $12,$21 # +=X[i+9] + dsll $17,$13,56 + dsrl $16,$13,1 + xor $18,$17 + dsll $17,7 + xor $18,$16 + dsrl $16,$13,8 + xor $18,$17 + + dsrl $19,$10,6 + xor $18,$16 # sigma0(X[i+1]) + dsll $17,$10,3 + daddu $12,$18 + dsrl $16,$10,19 + xor $19,$17 + dsll $17,42 + xor $19,$16 + dsrl $16,$10,61 + xor $19,$17 +#endif + xor $19,$16 # sigma1(X[i+14]) + daddu $12,$19 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $19,$2,$3 # 20 + drotr $17,$1,14 + daddu $16,$12,$7 + drotr $18,$1,18 + and $19,$1 + drotr $7,$1,41 + xor $17,$18 + drotr $18,$24,28 + xor $19,$3 # Ch(e,f,g) + xor $17,$7 # Sigma1(e) + + drotr $7,$24,34 + daddu $16,$19 + ld $19,160($6) # K[20] + xor $7,$18 + drotr $18,$24,39 + daddu $16,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#else + daddu $16,$12,$7 # 20 + dsrl $7,$1,14 + xor $19,$2,$3 + dsll $18,$1,23 + and $19,$1 + dsrl $17,$1,18 + xor $7,$18 + dsll $18,$1,46 + xor $7,$17 + dsrl $17,$1,41 + xor $7,$18 + dsll $18,$1,50 + xor $7,$17 + xor $19,$3 # Ch(e,f,g) + xor $17,$18,$7 # Sigma1(e) + + dsrl $7,$24,28 + daddu $16,$19 + ld $19,160($6) # K[20] + dsll $18,$24,25 + daddu $16,$17 + dsrl $17,$24,34 + xor $7,$18 + dsll $18,$24,30 + xor $7,$17 + dsrl $17,$24,39 + xor $7,$18 + dsll $18,$24,36 + xor $7,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#endif + sd $12,32($29) # offload to ring buffer + daddu $7,$17 + and $18,$24 + daddu $16,$19 # +=K[20] + daddu $7,$18 # +=Maj(a,b,c) + daddu $31,$16 + daddu $7,$16 + ld $15,56($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $19,$14,7 # Xupdate(21) + drotr $17,$14,1 + daddu $13,$22 # +=X[i+9] + xor $19,$17 + drotr $17,$14,8 + + dsrl $20,$11,6 + drotr $18,$11,19 + xor $19,$17 # sigma0(X[i+1]) + drotr $17,$11,61 + xor $20,$18 + daddu $13,$19 +#else + dsrl $19,$14,7 # Xupdate(21) + daddu $13,$22 # +=X[i+9] + dsll $18,$14,56 + dsrl $17,$14,1 + xor $19,$18 + dsll $18,7 + xor $19,$17 + dsrl $17,$14,8 + xor $19,$18 + + dsrl $20,$11,6 + xor $19,$17 # sigma0(X[i+1]) + dsll $18,$11,3 + daddu $13,$19 + dsrl $17,$11,19 + xor $20,$18 + dsll $18,42 + xor $20,$17 + dsrl $17,$11,61 + xor $20,$18 +#endif + xor $20,$17 # sigma1(X[i+14]) + daddu $13,$20 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $20,$1,$2 # 21 + drotr $18,$31,14 + daddu $17,$13,$3 + drotr $19,$31,18 + and $20,$31 + drotr $3,$31,41 + xor $18,$19 + drotr $19,$7,28 + xor $20,$2 # Ch(e,f,g) + xor $18,$3 # Sigma1(e) + + drotr $3,$7,34 + daddu $17,$20 + ld $20,168($6) # K[21] + xor $3,$19 + drotr $19,$7,39 + daddu $17,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#else + daddu $17,$13,$3 # 21 + dsrl $3,$31,14 + xor $20,$1,$2 + dsll $19,$31,23 + and $20,$31 + dsrl $18,$31,18 + xor $3,$19 + dsll $19,$31,46 + xor $3,$18 + dsrl $18,$31,41 + xor $3,$19 + dsll $19,$31,50 + xor $3,$18 + xor $20,$2 # Ch(e,f,g) + xor $18,$19,$3 # Sigma1(e) + + dsrl $3,$7,28 + daddu $17,$20 + ld $20,168($6) # K[21] + dsll $19,$7,25 + daddu $17,$18 + dsrl $18,$7,34 + xor $3,$19 + dsll $19,$7,30 + xor $3,$18 + dsrl $18,$7,39 + xor $3,$19 + dsll $19,$7,36 + xor $3,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#endif + sd $13,40($29) # offload to ring buffer + daddu $3,$18 + and $19,$7 + daddu $17,$20 # +=K[21] + daddu $3,$19 # +=Maj(a,b,c) + daddu $30,$17 + daddu $3,$17 + ld $16,64($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $20,$15,7 # Xupdate(22) + drotr $18,$15,1 + daddu $14,$23 # +=X[i+9] + xor $20,$18 + drotr $18,$15,8 + + dsrl $21,$12,6 + drotr $19,$12,19 + xor $20,$18 # sigma0(X[i+1]) + drotr $18,$12,61 + xor $21,$19 + daddu $14,$20 +#else + dsrl $20,$15,7 # Xupdate(22) + daddu $14,$23 # +=X[i+9] + dsll $19,$15,56 + dsrl $18,$15,1 + xor $20,$19 + dsll $19,7 + xor $20,$18 + dsrl $18,$15,8 + xor $20,$19 + + dsrl $21,$12,6 + xor $20,$18 # sigma0(X[i+1]) + dsll $19,$12,3 + daddu $14,$20 + dsrl $18,$12,19 + xor $21,$19 + dsll $19,42 + xor $21,$18 + dsrl $18,$12,61 + xor $21,$19 +#endif + xor $21,$18 # sigma1(X[i+14]) + daddu $14,$21 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $21,$31,$1 # 22 + drotr $19,$30,14 + daddu $18,$14,$2 + drotr $20,$30,18 + and $21,$30 + drotr $2,$30,41 + xor $19,$20 + drotr $20,$3,28 + xor $21,$1 # Ch(e,f,g) + xor $19,$2 # Sigma1(e) + + drotr $2,$3,34 + daddu $18,$21 + ld $21,176($6) # K[22] + xor $2,$20 + drotr $20,$3,39 + daddu $18,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#else + daddu $18,$14,$2 # 22 + dsrl $2,$30,14 + xor $21,$31,$1 + dsll $20,$30,23 + and $21,$30 + dsrl $19,$30,18 + xor $2,$20 + dsll $20,$30,46 + xor $2,$19 + dsrl $19,$30,41 + xor $2,$20 + dsll $20,$30,50 + xor $2,$19 + xor $21,$1 # Ch(e,f,g) + xor $19,$20,$2 # Sigma1(e) + + dsrl $2,$3,28 + daddu $18,$21 + ld $21,176($6) # K[22] + dsll $20,$3,25 + daddu $18,$19 + dsrl $19,$3,34 + xor $2,$20 + dsll $20,$3,30 + xor $2,$19 + dsrl $19,$3,39 + xor $2,$20 + dsll $20,$3,36 + xor $2,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#endif + sd $14,48($29) # offload to ring buffer + daddu $2,$19 + and $20,$3 + daddu $18,$21 # +=K[22] + daddu $2,$20 # +=Maj(a,b,c) + daddu $25,$18 + daddu $2,$18 + ld $17,72($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $21,$16,7 # Xupdate(23) + drotr $19,$16,1 + daddu $15,$8 # +=X[i+9] + xor $21,$19 + drotr $19,$16,8 + + dsrl $22,$13,6 + drotr $20,$13,19 + xor $21,$19 # sigma0(X[i+1]) + drotr $19,$13,61 + xor $22,$20 + daddu $15,$21 +#else + dsrl $21,$16,7 # Xupdate(23) + daddu $15,$8 # +=X[i+9] + dsll $20,$16,56 + dsrl $19,$16,1 + xor $21,$20 + dsll $20,7 + xor $21,$19 + dsrl $19,$16,8 + xor $21,$20 + + dsrl $22,$13,6 + xor $21,$19 # sigma0(X[i+1]) + dsll $20,$13,3 + daddu $15,$21 + dsrl $19,$13,19 + xor $22,$20 + dsll $20,42 + xor $22,$19 + dsrl $19,$13,61 + xor $22,$20 +#endif + xor $22,$19 # sigma1(X[i+14]) + daddu $15,$22 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $22,$30,$31 # 23 + drotr $20,$25,14 + daddu $19,$15,$1 + drotr $21,$25,18 + and $22,$25 + drotr $1,$25,41 + xor $20,$21 + drotr $21,$2,28 + xor $22,$31 # Ch(e,f,g) + xor $20,$1 # Sigma1(e) + + drotr $1,$2,34 + daddu $19,$22 + ld $22,184($6) # K[23] + xor $1,$21 + drotr $21,$2,39 + daddu $19,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#else + daddu $19,$15,$1 # 23 + dsrl $1,$25,14 + xor $22,$30,$31 + dsll $21,$25,23 + and $22,$25 + dsrl $20,$25,18 + xor $1,$21 + dsll $21,$25,46 + xor $1,$20 + dsrl $20,$25,41 + xor $1,$21 + dsll $21,$25,50 + xor $1,$20 + xor $22,$31 # Ch(e,f,g) + xor $20,$21,$1 # Sigma1(e) + + dsrl $1,$2,28 + daddu $19,$22 + ld $22,184($6) # K[23] + dsll $21,$2,25 + daddu $19,$20 + dsrl $20,$2,34 + xor $1,$21 + dsll $21,$2,30 + xor $1,$20 + dsrl $20,$2,39 + xor $1,$21 + dsll $21,$2,36 + xor $1,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#endif + sd $15,56($29) # offload to ring buffer + daddu $1,$20 + and $21,$2 + daddu $19,$22 # +=K[23] + daddu $1,$21 # +=Maj(a,b,c) + daddu $24,$19 + daddu $1,$19 + ld $18,80($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $22,$17,7 # Xupdate(24) + drotr $20,$17,1 + daddu $16,$9 # +=X[i+9] + xor $22,$20 + drotr $20,$17,8 + + dsrl $23,$14,6 + drotr $21,$14,19 + xor $22,$20 # sigma0(X[i+1]) + drotr $20,$14,61 + xor $23,$21 + daddu $16,$22 +#else + dsrl $22,$17,7 # Xupdate(24) + daddu $16,$9 # +=X[i+9] + dsll $21,$17,56 + dsrl $20,$17,1 + xor $22,$21 + dsll $21,7 + xor $22,$20 + dsrl $20,$17,8 + xor $22,$21 + + dsrl $23,$14,6 + xor $22,$20 # sigma0(X[i+1]) + dsll $21,$14,3 + daddu $16,$22 + dsrl $20,$14,19 + xor $23,$21 + dsll $21,42 + xor $23,$20 + dsrl $20,$14,61 + xor $23,$21 +#endif + xor $23,$20 # sigma1(X[i+14]) + daddu $16,$23 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $23,$25,$30 # 24 + drotr $21,$24,14 + daddu $20,$16,$31 + drotr $22,$24,18 + and $23,$24 + drotr $31,$24,41 + xor $21,$22 + drotr $22,$1,28 + xor $23,$30 # Ch(e,f,g) + xor $21,$31 # Sigma1(e) + + drotr $31,$1,34 + daddu $20,$23 + ld $23,192($6) # K[24] + xor $31,$22 + drotr $22,$1,39 + daddu $20,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#else + daddu $20,$16,$31 # 24 + dsrl $31,$24,14 + xor $23,$25,$30 + dsll $22,$24,23 + and $23,$24 + dsrl $21,$24,18 + xor $31,$22 + dsll $22,$24,46 + xor $31,$21 + dsrl $21,$24,41 + xor $31,$22 + dsll $22,$24,50 + xor $31,$21 + xor $23,$30 # Ch(e,f,g) + xor $21,$22,$31 # Sigma1(e) + + dsrl $31,$1,28 + daddu $20,$23 + ld $23,192($6) # K[24] + dsll $22,$1,25 + daddu $20,$21 + dsrl $21,$1,34 + xor $31,$22 + dsll $22,$1,30 + xor $31,$21 + dsrl $21,$1,39 + xor $31,$22 + dsll $22,$1,36 + xor $31,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#endif + sd $16,64($29) # offload to ring buffer + daddu $31,$21 + and $22,$1 + daddu $20,$23 # +=K[24] + daddu $31,$22 # +=Maj(a,b,c) + daddu $7,$20 + daddu $31,$20 + ld $19,88($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $23,$18,7 # Xupdate(25) + drotr $21,$18,1 + daddu $17,$10 # +=X[i+9] + xor $23,$21 + drotr $21,$18,8 + + dsrl $8,$15,6 + drotr $22,$15,19 + xor $23,$21 # sigma0(X[i+1]) + drotr $21,$15,61 + xor $8,$22 + daddu $17,$23 +#else + dsrl $23,$18,7 # Xupdate(25) + daddu $17,$10 # +=X[i+9] + dsll $22,$18,56 + dsrl $21,$18,1 + xor $23,$22 + dsll $22,7 + xor $23,$21 + dsrl $21,$18,8 + xor $23,$22 + + dsrl $8,$15,6 + xor $23,$21 # sigma0(X[i+1]) + dsll $22,$15,3 + daddu $17,$23 + dsrl $21,$15,19 + xor $8,$22 + dsll $22,42 + xor $8,$21 + dsrl $21,$15,61 + xor $8,$22 +#endif + xor $8,$21 # sigma1(X[i+14]) + daddu $17,$8 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $8,$24,$25 # 25 + drotr $22,$7,14 + daddu $21,$17,$30 + drotr $23,$7,18 + and $8,$7 + drotr $30,$7,41 + xor $22,$23 + drotr $23,$31,28 + xor $8,$25 # Ch(e,f,g) + xor $22,$30 # Sigma1(e) + + drotr $30,$31,34 + daddu $21,$8 + ld $8,200($6) # K[25] + xor $30,$23 + drotr $23,$31,39 + daddu $21,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#else + daddu $21,$17,$30 # 25 + dsrl $30,$7,14 + xor $8,$24,$25 + dsll $23,$7,23 + and $8,$7 + dsrl $22,$7,18 + xor $30,$23 + dsll $23,$7,46 + xor $30,$22 + dsrl $22,$7,41 + xor $30,$23 + dsll $23,$7,50 + xor $30,$22 + xor $8,$25 # Ch(e,f,g) + xor $22,$23,$30 # Sigma1(e) + + dsrl $30,$31,28 + daddu $21,$8 + ld $8,200($6) # K[25] + dsll $23,$31,25 + daddu $21,$22 + dsrl $22,$31,34 + xor $30,$23 + dsll $23,$31,30 + xor $30,$22 + dsrl $22,$31,39 + xor $30,$23 + dsll $23,$31,36 + xor $30,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#endif + sd $17,72($29) # offload to ring buffer + daddu $30,$22 + and $23,$31 + daddu $21,$8 # +=K[25] + daddu $30,$23 # +=Maj(a,b,c) + daddu $3,$21 + daddu $30,$21 + ld $20,96($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $8,$19,7 # Xupdate(26) + drotr $22,$19,1 + daddu $18,$11 # +=X[i+9] + xor $8,$22 + drotr $22,$19,8 + + dsrl $9,$16,6 + drotr $23,$16,19 + xor $8,$22 # sigma0(X[i+1]) + drotr $22,$16,61 + xor $9,$23 + daddu $18,$8 +#else + dsrl $8,$19,7 # Xupdate(26) + daddu $18,$11 # +=X[i+9] + dsll $23,$19,56 + dsrl $22,$19,1 + xor $8,$23 + dsll $23,7 + xor $8,$22 + dsrl $22,$19,8 + xor $8,$23 + + dsrl $9,$16,6 + xor $8,$22 # sigma0(X[i+1]) + dsll $23,$16,3 + daddu $18,$8 + dsrl $22,$16,19 + xor $9,$23 + dsll $23,42 + xor $9,$22 + dsrl $22,$16,61 + xor $9,$23 +#endif + xor $9,$22 # sigma1(X[i+14]) + daddu $18,$9 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $9,$7,$24 # 26 + drotr $23,$3,14 + daddu $22,$18,$25 + drotr $8,$3,18 + and $9,$3 + drotr $25,$3,41 + xor $23,$8 + drotr $8,$30,28 + xor $9,$24 # Ch(e,f,g) + xor $23,$25 # Sigma1(e) + + drotr $25,$30,34 + daddu $22,$9 + ld $9,208($6) # K[26] + xor $25,$8 + drotr $8,$30,39 + daddu $22,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#else + daddu $22,$18,$25 # 26 + dsrl $25,$3,14 + xor $9,$7,$24 + dsll $8,$3,23 + and $9,$3 + dsrl $23,$3,18 + xor $25,$8 + dsll $8,$3,46 + xor $25,$23 + dsrl $23,$3,41 + xor $25,$8 + dsll $8,$3,50 + xor $25,$23 + xor $9,$24 # Ch(e,f,g) + xor $23,$8,$25 # Sigma1(e) + + dsrl $25,$30,28 + daddu $22,$9 + ld $9,208($6) # K[26] + dsll $8,$30,25 + daddu $22,$23 + dsrl $23,$30,34 + xor $25,$8 + dsll $8,$30,30 + xor $25,$23 + dsrl $23,$30,39 + xor $25,$8 + dsll $8,$30,36 + xor $25,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#endif + sd $18,80($29) # offload to ring buffer + daddu $25,$23 + and $8,$30 + daddu $22,$9 # +=K[26] + daddu $25,$8 # +=Maj(a,b,c) + daddu $2,$22 + daddu $25,$22 + ld $21,104($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $9,$20,7 # Xupdate(27) + drotr $23,$20,1 + daddu $19,$12 # +=X[i+9] + xor $9,$23 + drotr $23,$20,8 + + dsrl $10,$17,6 + drotr $8,$17,19 + xor $9,$23 # sigma0(X[i+1]) + drotr $23,$17,61 + xor $10,$8 + daddu $19,$9 +#else + dsrl $9,$20,7 # Xupdate(27) + daddu $19,$12 # +=X[i+9] + dsll $8,$20,56 + dsrl $23,$20,1 + xor $9,$8 + dsll $8,7 + xor $9,$23 + dsrl $23,$20,8 + xor $9,$8 + + dsrl $10,$17,6 + xor $9,$23 # sigma0(X[i+1]) + dsll $8,$17,3 + daddu $19,$9 + dsrl $23,$17,19 + xor $10,$8 + dsll $8,42 + xor $10,$23 + dsrl $23,$17,61 + xor $10,$8 +#endif + xor $10,$23 # sigma1(X[i+14]) + daddu $19,$10 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $10,$3,$7 # 27 + drotr $8,$2,14 + daddu $23,$19,$24 + drotr $9,$2,18 + and $10,$2 + drotr $24,$2,41 + xor $8,$9 + drotr $9,$25,28 + xor $10,$7 # Ch(e,f,g) + xor $8,$24 # Sigma1(e) + + drotr $24,$25,34 + daddu $23,$10 + ld $10,216($6) # K[27] + xor $24,$9 + drotr $9,$25,39 + daddu $23,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#else + daddu $23,$19,$24 # 27 + dsrl $24,$2,14 + xor $10,$3,$7 + dsll $9,$2,23 + and $10,$2 + dsrl $8,$2,18 + xor $24,$9 + dsll $9,$2,46 + xor $24,$8 + dsrl $8,$2,41 + xor $24,$9 + dsll $9,$2,50 + xor $24,$8 + xor $10,$7 # Ch(e,f,g) + xor $8,$9,$24 # Sigma1(e) + + dsrl $24,$25,28 + daddu $23,$10 + ld $10,216($6) # K[27] + dsll $9,$25,25 + daddu $23,$8 + dsrl $8,$25,34 + xor $24,$9 + dsll $9,$25,30 + xor $24,$8 + dsrl $8,$25,39 + xor $24,$9 + dsll $9,$25,36 + xor $24,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#endif + sd $19,88($29) # offload to ring buffer + daddu $24,$8 + and $9,$25 + daddu $23,$10 # +=K[27] + daddu $24,$9 # +=Maj(a,b,c) + daddu $1,$23 + daddu $24,$23 + ld $22,112($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $10,$21,7 # Xupdate(28) + drotr $8,$21,1 + daddu $20,$13 # +=X[i+9] + xor $10,$8 + drotr $8,$21,8 + + dsrl $11,$18,6 + drotr $9,$18,19 + xor $10,$8 # sigma0(X[i+1]) + drotr $8,$18,61 + xor $11,$9 + daddu $20,$10 +#else + dsrl $10,$21,7 # Xupdate(28) + daddu $20,$13 # +=X[i+9] + dsll $9,$21,56 + dsrl $8,$21,1 + xor $10,$9 + dsll $9,7 + xor $10,$8 + dsrl $8,$21,8 + xor $10,$9 + + dsrl $11,$18,6 + xor $10,$8 # sigma0(X[i+1]) + dsll $9,$18,3 + daddu $20,$10 + dsrl $8,$18,19 + xor $11,$9 + dsll $9,42 + xor $11,$8 + dsrl $8,$18,61 + xor $11,$9 +#endif + xor $11,$8 # sigma1(X[i+14]) + daddu $20,$11 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $11,$2,$3 # 28 + drotr $9,$1,14 + daddu $8,$20,$7 + drotr $10,$1,18 + and $11,$1 + drotr $7,$1,41 + xor $9,$10 + drotr $10,$24,28 + xor $11,$3 # Ch(e,f,g) + xor $9,$7 # Sigma1(e) + + drotr $7,$24,34 + daddu $8,$11 + ld $11,224($6) # K[28] + xor $7,$10 + drotr $10,$24,39 + daddu $8,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#else + daddu $8,$20,$7 # 28 + dsrl $7,$1,14 + xor $11,$2,$3 + dsll $10,$1,23 + and $11,$1 + dsrl $9,$1,18 + xor $7,$10 + dsll $10,$1,46 + xor $7,$9 + dsrl $9,$1,41 + xor $7,$10 + dsll $10,$1,50 + xor $7,$9 + xor $11,$3 # Ch(e,f,g) + xor $9,$10,$7 # Sigma1(e) + + dsrl $7,$24,28 + daddu $8,$11 + ld $11,224($6) # K[28] + dsll $10,$24,25 + daddu $8,$9 + dsrl $9,$24,34 + xor $7,$10 + dsll $10,$24,30 + xor $7,$9 + dsrl $9,$24,39 + xor $7,$10 + dsll $10,$24,36 + xor $7,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#endif + sd $20,96($29) # offload to ring buffer + daddu $7,$9 + and $10,$24 + daddu $8,$11 # +=K[28] + daddu $7,$10 # +=Maj(a,b,c) + daddu $31,$8 + daddu $7,$8 + ld $23,120($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $11,$22,7 # Xupdate(29) + drotr $9,$22,1 + daddu $21,$14 # +=X[i+9] + xor $11,$9 + drotr $9,$22,8 + + dsrl $12,$19,6 + drotr $10,$19,19 + xor $11,$9 # sigma0(X[i+1]) + drotr $9,$19,61 + xor $12,$10 + daddu $21,$11 +#else + dsrl $11,$22,7 # Xupdate(29) + daddu $21,$14 # +=X[i+9] + dsll $10,$22,56 + dsrl $9,$22,1 + xor $11,$10 + dsll $10,7 + xor $11,$9 + dsrl $9,$22,8 + xor $11,$10 + + dsrl $12,$19,6 + xor $11,$9 # sigma0(X[i+1]) + dsll $10,$19,3 + daddu $21,$11 + dsrl $9,$19,19 + xor $12,$10 + dsll $10,42 + xor $12,$9 + dsrl $9,$19,61 + xor $12,$10 +#endif + xor $12,$9 # sigma1(X[i+14]) + daddu $21,$12 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $12,$1,$2 # 29 + drotr $10,$31,14 + daddu $9,$21,$3 + drotr $11,$31,18 + and $12,$31 + drotr $3,$31,41 + xor $10,$11 + drotr $11,$7,28 + xor $12,$2 # Ch(e,f,g) + xor $10,$3 # Sigma1(e) + + drotr $3,$7,34 + daddu $9,$12 + ld $12,232($6) # K[29] + xor $3,$11 + drotr $11,$7,39 + daddu $9,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#else + daddu $9,$21,$3 # 29 + dsrl $3,$31,14 + xor $12,$1,$2 + dsll $11,$31,23 + and $12,$31 + dsrl $10,$31,18 + xor $3,$11 + dsll $11,$31,46 + xor $3,$10 + dsrl $10,$31,41 + xor $3,$11 + dsll $11,$31,50 + xor $3,$10 + xor $12,$2 # Ch(e,f,g) + xor $10,$11,$3 # Sigma1(e) + + dsrl $3,$7,28 + daddu $9,$12 + ld $12,232($6) # K[29] + dsll $11,$7,25 + daddu $9,$10 + dsrl $10,$7,34 + xor $3,$11 + dsll $11,$7,30 + xor $3,$10 + dsrl $10,$7,39 + xor $3,$11 + dsll $11,$7,36 + xor $3,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#endif + sd $21,104($29) # offload to ring buffer + daddu $3,$10 + and $11,$7 + daddu $9,$12 # +=K[29] + daddu $3,$11 # +=Maj(a,b,c) + daddu $30,$9 + daddu $3,$9 + ld $8,0($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $12,$23,7 # Xupdate(30) + drotr $10,$23,1 + daddu $22,$15 # +=X[i+9] + xor $12,$10 + drotr $10,$23,8 + + dsrl $13,$20,6 + drotr $11,$20,19 + xor $12,$10 # sigma0(X[i+1]) + drotr $10,$20,61 + xor $13,$11 + daddu $22,$12 +#else + dsrl $12,$23,7 # Xupdate(30) + daddu $22,$15 # +=X[i+9] + dsll $11,$23,56 + dsrl $10,$23,1 + xor $12,$11 + dsll $11,7 + xor $12,$10 + dsrl $10,$23,8 + xor $12,$11 + + dsrl $13,$20,6 + xor $12,$10 # sigma0(X[i+1]) + dsll $11,$20,3 + daddu $22,$12 + dsrl $10,$20,19 + xor $13,$11 + dsll $11,42 + xor $13,$10 + dsrl $10,$20,61 + xor $13,$11 +#endif + xor $13,$10 # sigma1(X[i+14]) + daddu $22,$13 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $13,$31,$1 # 30 + drotr $11,$30,14 + daddu $10,$22,$2 + drotr $12,$30,18 + and $13,$30 + drotr $2,$30,41 + xor $11,$12 + drotr $12,$3,28 + xor $13,$1 # Ch(e,f,g) + xor $11,$2 # Sigma1(e) + + drotr $2,$3,34 + daddu $10,$13 + ld $13,240($6) # K[30] + xor $2,$12 + drotr $12,$3,39 + daddu $10,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#else + daddu $10,$22,$2 # 30 + dsrl $2,$30,14 + xor $13,$31,$1 + dsll $12,$30,23 + and $13,$30 + dsrl $11,$30,18 + xor $2,$12 + dsll $12,$30,46 + xor $2,$11 + dsrl $11,$30,41 + xor $2,$12 + dsll $12,$30,50 + xor $2,$11 + xor $13,$1 # Ch(e,f,g) + xor $11,$12,$2 # Sigma1(e) + + dsrl $2,$3,28 + daddu $10,$13 + ld $13,240($6) # K[30] + dsll $12,$3,25 + daddu $10,$11 + dsrl $11,$3,34 + xor $2,$12 + dsll $12,$3,30 + xor $2,$11 + dsrl $11,$3,39 + xor $2,$12 + dsll $12,$3,36 + xor $2,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#endif + sd $22,112($29) # offload to ring buffer + daddu $2,$11 + and $12,$3 + daddu $10,$13 # +=K[30] + daddu $2,$12 # +=Maj(a,b,c) + daddu $25,$10 + daddu $2,$10 + ld $9,8($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $13,$8,7 # Xupdate(31) + drotr $11,$8,1 + daddu $23,$16 # +=X[i+9] + xor $13,$11 + drotr $11,$8,8 + + dsrl $14,$21,6 + drotr $12,$21,19 + xor $13,$11 # sigma0(X[i+1]) + drotr $11,$21,61 + xor $14,$12 + daddu $23,$13 +#else + dsrl $13,$8,7 # Xupdate(31) + daddu $23,$16 # +=X[i+9] + dsll $12,$8,56 + dsrl $11,$8,1 + xor $13,$12 + dsll $12,7 + xor $13,$11 + dsrl $11,$8,8 + xor $13,$12 + + dsrl $14,$21,6 + xor $13,$11 # sigma0(X[i+1]) + dsll $12,$21,3 + daddu $23,$13 + dsrl $11,$21,19 + xor $14,$12 + dsll $12,42 + xor $14,$11 + dsrl $11,$21,61 + xor $14,$12 +#endif + xor $14,$11 # sigma1(X[i+14]) + daddu $23,$14 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $14,$30,$31 # 31 + drotr $12,$25,14 + daddu $11,$23,$1 + drotr $13,$25,18 + and $14,$25 + drotr $1,$25,41 + xor $12,$13 + drotr $13,$2,28 + xor $14,$31 # Ch(e,f,g) + xor $12,$1 # Sigma1(e) + + drotr $1,$2,34 + daddu $11,$14 + ld $14,248($6) # K[31] + xor $1,$13 + drotr $13,$2,39 + daddu $11,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#else + daddu $11,$23,$1 # 31 + dsrl $1,$25,14 + xor $14,$30,$31 + dsll $13,$25,23 + and $14,$25 + dsrl $12,$25,18 + xor $1,$13 + dsll $13,$25,46 + xor $1,$12 + dsrl $12,$25,41 + xor $1,$13 + dsll $13,$25,50 + xor $1,$12 + xor $14,$31 # Ch(e,f,g) + xor $12,$13,$1 # Sigma1(e) + + dsrl $1,$2,28 + daddu $11,$14 + ld $14,248($6) # K[31] + dsll $13,$2,25 + daddu $11,$12 + dsrl $12,$2,34 + xor $1,$13 + dsll $13,$2,30 + xor $1,$12 + dsrl $12,$2,39 + xor $1,$13 + dsll $13,$2,36 + xor $1,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#endif + sd $23,120($29) # offload to ring buffer + daddu $1,$12 + and $13,$2 + daddu $11,$14 # +=K[31] + daddu $1,$13 # +=Maj(a,b,c) + daddu $24,$11 + daddu $1,$11 + ld $10,16($29) # prefetch from ring buffer + and $14,0xfff + li $15,2071 + .set noreorder + bne $14,$15,.L16_xx + daddu $6,16*8 # Ktbl+=16 + + ld $23,16*8($29) # restore pointer to the end of input + ld $8,0*8($4) + ld $9,1*8($4) + ld $10,2*8($4) + daddu $5,16*8 + ld $11,3*8($4) + daddu $1,$8 + ld $12,4*8($4) + daddu $2,$9 + ld $13,5*8($4) + daddu $3,$10 + ld $14,6*8($4) + daddu $7,$11 + ld $15,7*8($4) + daddu $24,$12 + sd $1,0*8($4) + daddu $25,$13 + sd $2,1*8($4) + daddu $30,$14 + sd $3,2*8($4) + daddu $31,$15 + sd $7,3*8($4) + sd $24,4*8($4) + sd $25,5*8($4) + sd $30,6*8($4) + sd $31,7*8($4) + + bne $5,$23,.Loop + dsubu $6,512 # rewind $6 + + ld $31,256-1*8($29) + ld $30,256-2*8($29) + ld $23,256-3*8($29) + ld $22,256-4*8($29) + ld $21,256-5*8($29) + ld $20,256-6*8($29) + ld $19,256-7*8($29) + ld $18,256-8*8($29) + ld $17,256-9*8($29) + ld $16,256-10*8($29) + jr $31 + daddu $29,256 +.end sha512_block_data_order + +.rdata +.align 5 +K512: + .dword 0x428a2f98d728ae22, 0x7137449123ef65cd + .dword 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc + .dword 0x3956c25bf348b538, 0x59f111f1b605d019 + .dword 0x923f82a4af194f9b, 0xab1c5ed5da6d8118 + .dword 0xd807aa98a3030242, 0x12835b0145706fbe + .dword 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2 + .dword 0x72be5d74f27b896f, 0x80deb1fe3b1696b1 + .dword 0x9bdc06a725c71235, 0xc19bf174cf692694 + .dword 0xe49b69c19ef14ad2, 0xefbe4786384f25e3 + .dword 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65 + .dword 0x2de92c6f592b0275, 0x4a7484aa6ea6e483 + .dword 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5 + .dword 0x983e5152ee66dfab, 0xa831c66d2db43210 + .dword 0xb00327c898fb213f, 0xbf597fc7beef0ee4 + .dword 0xc6e00bf33da88fc2, 0xd5a79147930aa725 + .dword 0x06ca6351e003826f, 0x142929670a0e6e70 + .dword 0x27b70a8546d22ffc, 0x2e1b21385c26c926 + .dword 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df + .dword 0x650a73548baf63de, 0x766a0abb3c77b2a8 + .dword 0x81c2c92e47edaee6, 0x92722c851482353b + .dword 0xa2bfe8a14cf10364, 0xa81a664bbc423001 + .dword 0xc24b8b70d0f89791, 0xc76c51a30654be30 + .dword 0xd192e819d6ef5218, 0xd69906245565a910 + .dword 0xf40e35855771202a, 0x106aa07032bbd1b8 + .dword 0x19a4c116b8d2d0c8, 0x1e376c085141ab53 + .dword 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8 + .dword 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb + .dword 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3 + .dword 0x748f82ee5defb2fc, 0x78a5636f43172f60 + .dword 0x84c87814a1f0ab72, 0x8cc702081a6439ec + .dword 0x90befffa23631e28, 0xa4506cebde82bde9 + .dword 0xbef9a3f7b2c67915, 0xc67178f2e372532b + .dword 0xca273eceea26619c, 0xd186b8c721c0c207 + .dword 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178 + .dword 0x06f067aa72176fba, 0x0a637dc5a2c898a6 + .dword 0x113f9804bef90dae, 0x1b710b35131c471b + .dword 0x28db77f523047d84, 0x32caab7b40c72493 + .dword 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c + .dword 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a + .dword 0x5fcb6fab3ad6faec, 0x6c44198c4a475817 +.asciiz "SHA512 for MIPS, CRYPTOGAMS by " +.align 5 + diff --git a/deps/openssl/config/archs/linux64-mips64/asm_avx2/configdata.pm b/deps/openssl/config/archs/linux64-mips64/asm_avx2/configdata.pm index f53a77e0a05edb..b012db3a54c06c 100644 --- a/deps/openssl/config/archs/linux64-mips64/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/linux64-mips64/asm_avx2/configdata.pm @@ -62,7 +62,7 @@ our %config = ( options => "enable-ssl-trace no-afalgeng no-asan no-buildtest-c++ no-comp no-crypto-mdebug no-crypto-mdebug-backtrace no-devcryptoeng no-dynamic-engine no-ec_nistp_64_gcc_128 no-egd no-external-tests no-fuzz-afl no-fuzz-libfuzzer no-heartbeats no-md2 no-msan no-rc5 no-sctp no-shared no-ssl3 no-ssl3-method no-ubsan no-unit-test no-weak-ssl-ciphers no-zlib no-zlib-dynamic", perl_archname => "x86_64-linux-gnu-thread-multi", perl_cmd => "/usr/bin/perl", - perl_version => "5.28.1", + perl_version => "5.26.1", perlargv => [ "no-comp", "no-shared", "no-afalgeng", "enable-ssl-trace", "linux64-mips64" ], perlenv => { "AR" => undef, @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux64-mips64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -15356,3 +15356,4 @@ Verbose output. =back =cut + diff --git a/deps/openssl/config/archs/linux64-mips64/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/linux64-mips64/asm_avx2/crypto/buildinf.h index 87d8f13efafd96..aebc04c835dbd9 100644 --- a/deps/openssl/config/archs/linux64-mips64/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux64-mips64/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux64-mips64" -#define DATE "built on: Wed Mar 18 21:09:39 2020 UTC" +#define DATE "built on: Tue Mar 31 13:08:21 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux64-mips64/asm_avx2/crypto/sha/sha256-mips.S b/deps/openssl/config/archs/linux64-mips64/asm_avx2/crypto/sha/sha256-mips.S new file mode 100644 index 00000000000000..2906baaf6e155f --- /dev/null +++ b/deps/openssl/config/archs/linux64-mips64/asm_avx2/crypto/sha/sha256-mips.S @@ -0,0 +1,3037 @@ +#include "mips_arch.h" + +.text +.set noat +#if !defined(__mips_eabi) && (!defined(__vxworks) || defined(__pic__)) +.option pic2 +#endif + +.align 5 +.globl sha256_block_data_order +.ent sha256_block_data_order +sha256_block_data_order: + .frame $29,192,$31 + .mask 0xc0ff0000,-8 + .set noreorder + dsubu $29,192 + sd $31,192-1*8($29) + sd $30,192-2*8($29) + sd $23,192-3*8($29) + sd $22,192-4*8($29) + sd $21,192-5*8($29) + sd $20,192-6*8($29) + sd $19,192-7*8($29) + sd $18,192-8*8($29) + sd $17,192-9*8($29) + sd $16,192-10*8($29) + dsll $23,$6,6 + .cplocal $6 + .cpsetup $25,$0,sha256_block_data_order + .set reorder + dla $6,K256 # PIC-ified 'load address' + + lw $1,0*4($4) # load context + lw $2,1*4($4) + lw $3,2*4($4) + lw $7,3*4($4) + lw $24,4*4($4) + lw $25,5*4($4) + lw $30,6*4($4) + lw $31,7*4($4) + + daddu $23,$5 # pointer to the end of input + sd $23,16*4($29) + b .Loop + +.align 5 +.Loop: +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $8,($5) +#else + lwl $8,3($5) + lwr $8,0($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $9,4($5) +#else + lwl $9,7($5) + lwr $9,4($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $8,$8 # byte swap(0) + rotr $8,$8,16 +#else + srl $13,$8,24 # byte swap(0) + srl $14,$8,8 + andi $15,$8,0xFF00 + sll $8,$8,24 + andi $14,0xFF00 + sll $15,$15,8 + or $8,$13 + or $14,$15 + or $8,$14 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $15,$25,$30 # 0 + rotr $13,$24,6 + addu $12,$8,$31 + rotr $14,$24,11 + and $15,$24 + rotr $31,$24,25 + xor $13,$14 + rotr $14,$1,2 + xor $15,$30 # Ch(e,f,g) + xor $13,$31 # Sigma1(e) + + rotr $31,$1,13 + addu $12,$15 + lw $15,0($6) # K[0] + xor $31,$14 + rotr $14,$1,22 + addu $12,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#else + addu $12,$8,$31 # 0 + srl $31,$24,6 + xor $15,$25,$30 + sll $14,$24,7 + and $15,$24 + srl $13,$24,11 + xor $31,$14 + sll $14,$24,21 + xor $31,$13 + srl $13,$24,25 + xor $31,$14 + sll $14,$24,26 + xor $31,$13 + xor $15,$30 # Ch(e,f,g) + xor $13,$14,$31 # Sigma1(e) + + srl $31,$1,2 + addu $12,$15 + lw $15,0($6) # K[0] + sll $14,$1,10 + addu $12,$13 + srl $13,$1,13 + xor $31,$14 + sll $14,$1,19 + xor $31,$13 + srl $13,$1,22 + xor $31,$14 + sll $14,$1,30 + xor $31,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#endif + sw $8,0($29) # offload to ring buffer + addu $31,$13 + and $14,$1 + addu $12,$15 # +=K[0] + addu $31,$14 # +=Maj(a,b,c) + addu $7,$12 + addu $31,$12 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $10,8($5) +#else + lwl $10,11($5) + lwr $10,8($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $9,$9 # byte swap(1) + rotr $9,$9,16 +#else + srl $14,$9,24 # byte swap(1) + srl $15,$9,8 + andi $16,$9,0xFF00 + sll $9,$9,24 + andi $15,0xFF00 + sll $16,$16,8 + or $9,$14 + or $15,$16 + or $9,$15 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $16,$24,$25 # 1 + rotr $14,$7,6 + addu $13,$9,$30 + rotr $15,$7,11 + and $16,$7 + rotr $30,$7,25 + xor $14,$15 + rotr $15,$31,2 + xor $16,$25 # Ch(e,f,g) + xor $14,$30 # Sigma1(e) + + rotr $30,$31,13 + addu $13,$16 + lw $16,4($6) # K[1] + xor $30,$15 + rotr $15,$31,22 + addu $13,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#else + addu $13,$9,$30 # 1 + srl $30,$7,6 + xor $16,$24,$25 + sll $15,$7,7 + and $16,$7 + srl $14,$7,11 + xor $30,$15 + sll $15,$7,21 + xor $30,$14 + srl $14,$7,25 + xor $30,$15 + sll $15,$7,26 + xor $30,$14 + xor $16,$25 # Ch(e,f,g) + xor $14,$15,$30 # Sigma1(e) + + srl $30,$31,2 + addu $13,$16 + lw $16,4($6) # K[1] + sll $15,$31,10 + addu $13,$14 + srl $14,$31,13 + xor $30,$15 + sll $15,$31,19 + xor $30,$14 + srl $14,$31,22 + xor $30,$15 + sll $15,$31,30 + xor $30,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#endif + sw $9,4($29) # offload to ring buffer + addu $30,$14 + and $15,$31 + addu $13,$16 # +=K[1] + addu $30,$15 # +=Maj(a,b,c) + addu $3,$13 + addu $30,$13 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $11,12($5) +#else + lwl $11,15($5) + lwr $11,12($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $10,$10 # byte swap(2) + rotr $10,$10,16 +#else + srl $15,$10,24 # byte swap(2) + srl $16,$10,8 + andi $17,$10,0xFF00 + sll $10,$10,24 + andi $16,0xFF00 + sll $17,$17,8 + or $10,$15 + or $16,$17 + or $10,$16 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $17,$7,$24 # 2 + rotr $15,$3,6 + addu $14,$10,$25 + rotr $16,$3,11 + and $17,$3 + rotr $25,$3,25 + xor $15,$16 + rotr $16,$30,2 + xor $17,$24 # Ch(e,f,g) + xor $15,$25 # Sigma1(e) + + rotr $25,$30,13 + addu $14,$17 + lw $17,8($6) # K[2] + xor $25,$16 + rotr $16,$30,22 + addu $14,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#else + addu $14,$10,$25 # 2 + srl $25,$3,6 + xor $17,$7,$24 + sll $16,$3,7 + and $17,$3 + srl $15,$3,11 + xor $25,$16 + sll $16,$3,21 + xor $25,$15 + srl $15,$3,25 + xor $25,$16 + sll $16,$3,26 + xor $25,$15 + xor $17,$24 # Ch(e,f,g) + xor $15,$16,$25 # Sigma1(e) + + srl $25,$30,2 + addu $14,$17 + lw $17,8($6) # K[2] + sll $16,$30,10 + addu $14,$15 + srl $15,$30,13 + xor $25,$16 + sll $16,$30,19 + xor $25,$15 + srl $15,$30,22 + xor $25,$16 + sll $16,$30,30 + xor $25,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#endif + sw $10,8($29) # offload to ring buffer + addu $25,$15 + and $16,$30 + addu $14,$17 # +=K[2] + addu $25,$16 # +=Maj(a,b,c) + addu $2,$14 + addu $25,$14 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $12,16($5) +#else + lwl $12,19($5) + lwr $12,16($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $11,$11 # byte swap(3) + rotr $11,$11,16 +#else + srl $16,$11,24 # byte swap(3) + srl $17,$11,8 + andi $18,$11,0xFF00 + sll $11,$11,24 + andi $17,0xFF00 + sll $18,$18,8 + or $11,$16 + or $17,$18 + or $11,$17 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $18,$3,$7 # 3 + rotr $16,$2,6 + addu $15,$11,$24 + rotr $17,$2,11 + and $18,$2 + rotr $24,$2,25 + xor $16,$17 + rotr $17,$25,2 + xor $18,$7 # Ch(e,f,g) + xor $16,$24 # Sigma1(e) + + rotr $24,$25,13 + addu $15,$18 + lw $18,12($6) # K[3] + xor $24,$17 + rotr $17,$25,22 + addu $15,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#else + addu $15,$11,$24 # 3 + srl $24,$2,6 + xor $18,$3,$7 + sll $17,$2,7 + and $18,$2 + srl $16,$2,11 + xor $24,$17 + sll $17,$2,21 + xor $24,$16 + srl $16,$2,25 + xor $24,$17 + sll $17,$2,26 + xor $24,$16 + xor $18,$7 # Ch(e,f,g) + xor $16,$17,$24 # Sigma1(e) + + srl $24,$25,2 + addu $15,$18 + lw $18,12($6) # K[3] + sll $17,$25,10 + addu $15,$16 + srl $16,$25,13 + xor $24,$17 + sll $17,$25,19 + xor $24,$16 + srl $16,$25,22 + xor $24,$17 + sll $17,$25,30 + xor $24,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#endif + sw $11,12($29) # offload to ring buffer + addu $24,$16 + and $17,$25 + addu $15,$18 # +=K[3] + addu $24,$17 # +=Maj(a,b,c) + addu $1,$15 + addu $24,$15 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $13,20($5) +#else + lwl $13,23($5) + lwr $13,20($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $12,$12 # byte swap(4) + rotr $12,$12,16 +#else + srl $17,$12,24 # byte swap(4) + srl $18,$12,8 + andi $19,$12,0xFF00 + sll $12,$12,24 + andi $18,0xFF00 + sll $19,$19,8 + or $12,$17 + or $18,$19 + or $12,$18 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $19,$2,$3 # 4 + rotr $17,$1,6 + addu $16,$12,$7 + rotr $18,$1,11 + and $19,$1 + rotr $7,$1,25 + xor $17,$18 + rotr $18,$24,2 + xor $19,$3 # Ch(e,f,g) + xor $17,$7 # Sigma1(e) + + rotr $7,$24,13 + addu $16,$19 + lw $19,16($6) # K[4] + xor $7,$18 + rotr $18,$24,22 + addu $16,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#else + addu $16,$12,$7 # 4 + srl $7,$1,6 + xor $19,$2,$3 + sll $18,$1,7 + and $19,$1 + srl $17,$1,11 + xor $7,$18 + sll $18,$1,21 + xor $7,$17 + srl $17,$1,25 + xor $7,$18 + sll $18,$1,26 + xor $7,$17 + xor $19,$3 # Ch(e,f,g) + xor $17,$18,$7 # Sigma1(e) + + srl $7,$24,2 + addu $16,$19 + lw $19,16($6) # K[4] + sll $18,$24,10 + addu $16,$17 + srl $17,$24,13 + xor $7,$18 + sll $18,$24,19 + xor $7,$17 + srl $17,$24,22 + xor $7,$18 + sll $18,$24,30 + xor $7,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#endif + sw $12,16($29) # offload to ring buffer + addu $7,$17 + and $18,$24 + addu $16,$19 # +=K[4] + addu $7,$18 # +=Maj(a,b,c) + addu $31,$16 + addu $7,$16 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $14,24($5) +#else + lwl $14,27($5) + lwr $14,24($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $13,$13 # byte swap(5) + rotr $13,$13,16 +#else + srl $18,$13,24 # byte swap(5) + srl $19,$13,8 + andi $20,$13,0xFF00 + sll $13,$13,24 + andi $19,0xFF00 + sll $20,$20,8 + or $13,$18 + or $19,$20 + or $13,$19 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $20,$1,$2 # 5 + rotr $18,$31,6 + addu $17,$13,$3 + rotr $19,$31,11 + and $20,$31 + rotr $3,$31,25 + xor $18,$19 + rotr $19,$7,2 + xor $20,$2 # Ch(e,f,g) + xor $18,$3 # Sigma1(e) + + rotr $3,$7,13 + addu $17,$20 + lw $20,20($6) # K[5] + xor $3,$19 + rotr $19,$7,22 + addu $17,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#else + addu $17,$13,$3 # 5 + srl $3,$31,6 + xor $20,$1,$2 + sll $19,$31,7 + and $20,$31 + srl $18,$31,11 + xor $3,$19 + sll $19,$31,21 + xor $3,$18 + srl $18,$31,25 + xor $3,$19 + sll $19,$31,26 + xor $3,$18 + xor $20,$2 # Ch(e,f,g) + xor $18,$19,$3 # Sigma1(e) + + srl $3,$7,2 + addu $17,$20 + lw $20,20($6) # K[5] + sll $19,$7,10 + addu $17,$18 + srl $18,$7,13 + xor $3,$19 + sll $19,$7,19 + xor $3,$18 + srl $18,$7,22 + xor $3,$19 + sll $19,$7,30 + xor $3,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#endif + sw $13,20($29) # offload to ring buffer + addu $3,$18 + and $19,$7 + addu $17,$20 # +=K[5] + addu $3,$19 # +=Maj(a,b,c) + addu $30,$17 + addu $3,$17 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $15,28($5) +#else + lwl $15,31($5) + lwr $15,28($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $14,$14 # byte swap(6) + rotr $14,$14,16 +#else + srl $19,$14,24 # byte swap(6) + srl $20,$14,8 + andi $21,$14,0xFF00 + sll $14,$14,24 + andi $20,0xFF00 + sll $21,$21,8 + or $14,$19 + or $20,$21 + or $14,$20 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $21,$31,$1 # 6 + rotr $19,$30,6 + addu $18,$14,$2 + rotr $20,$30,11 + and $21,$30 + rotr $2,$30,25 + xor $19,$20 + rotr $20,$3,2 + xor $21,$1 # Ch(e,f,g) + xor $19,$2 # Sigma1(e) + + rotr $2,$3,13 + addu $18,$21 + lw $21,24($6) # K[6] + xor $2,$20 + rotr $20,$3,22 + addu $18,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#else + addu $18,$14,$2 # 6 + srl $2,$30,6 + xor $21,$31,$1 + sll $20,$30,7 + and $21,$30 + srl $19,$30,11 + xor $2,$20 + sll $20,$30,21 + xor $2,$19 + srl $19,$30,25 + xor $2,$20 + sll $20,$30,26 + xor $2,$19 + xor $21,$1 # Ch(e,f,g) + xor $19,$20,$2 # Sigma1(e) + + srl $2,$3,2 + addu $18,$21 + lw $21,24($6) # K[6] + sll $20,$3,10 + addu $18,$19 + srl $19,$3,13 + xor $2,$20 + sll $20,$3,19 + xor $2,$19 + srl $19,$3,22 + xor $2,$20 + sll $20,$3,30 + xor $2,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#endif + sw $14,24($29) # offload to ring buffer + addu $2,$19 + and $20,$3 + addu $18,$21 # +=K[6] + addu $2,$20 # +=Maj(a,b,c) + addu $25,$18 + addu $2,$18 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $16,32($5) +#else + lwl $16,35($5) + lwr $16,32($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $15,$15 # byte swap(7) + rotr $15,$15,16 +#else + srl $20,$15,24 # byte swap(7) + srl $21,$15,8 + andi $22,$15,0xFF00 + sll $15,$15,24 + andi $21,0xFF00 + sll $22,$22,8 + or $15,$20 + or $21,$22 + or $15,$21 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $22,$30,$31 # 7 + rotr $20,$25,6 + addu $19,$15,$1 + rotr $21,$25,11 + and $22,$25 + rotr $1,$25,25 + xor $20,$21 + rotr $21,$2,2 + xor $22,$31 # Ch(e,f,g) + xor $20,$1 # Sigma1(e) + + rotr $1,$2,13 + addu $19,$22 + lw $22,28($6) # K[7] + xor $1,$21 + rotr $21,$2,22 + addu $19,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#else + addu $19,$15,$1 # 7 + srl $1,$25,6 + xor $22,$30,$31 + sll $21,$25,7 + and $22,$25 + srl $20,$25,11 + xor $1,$21 + sll $21,$25,21 + xor $1,$20 + srl $20,$25,25 + xor $1,$21 + sll $21,$25,26 + xor $1,$20 + xor $22,$31 # Ch(e,f,g) + xor $20,$21,$1 # Sigma1(e) + + srl $1,$2,2 + addu $19,$22 + lw $22,28($6) # K[7] + sll $21,$2,10 + addu $19,$20 + srl $20,$2,13 + xor $1,$21 + sll $21,$2,19 + xor $1,$20 + srl $20,$2,22 + xor $1,$21 + sll $21,$2,30 + xor $1,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#endif + sw $15,28($29) # offload to ring buffer + addu $1,$20 + and $21,$2 + addu $19,$22 # +=K[7] + addu $1,$21 # +=Maj(a,b,c) + addu $24,$19 + addu $1,$19 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $17,36($5) +#else + lwl $17,39($5) + lwr $17,36($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $16,$16 # byte swap(8) + rotr $16,$16,16 +#else + srl $21,$16,24 # byte swap(8) + srl $22,$16,8 + andi $23,$16,0xFF00 + sll $16,$16,24 + andi $22,0xFF00 + sll $23,$23,8 + or $16,$21 + or $22,$23 + or $16,$22 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $23,$25,$30 # 8 + rotr $21,$24,6 + addu $20,$16,$31 + rotr $22,$24,11 + and $23,$24 + rotr $31,$24,25 + xor $21,$22 + rotr $22,$1,2 + xor $23,$30 # Ch(e,f,g) + xor $21,$31 # Sigma1(e) + + rotr $31,$1,13 + addu $20,$23 + lw $23,32($6) # K[8] + xor $31,$22 + rotr $22,$1,22 + addu $20,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#else + addu $20,$16,$31 # 8 + srl $31,$24,6 + xor $23,$25,$30 + sll $22,$24,7 + and $23,$24 + srl $21,$24,11 + xor $31,$22 + sll $22,$24,21 + xor $31,$21 + srl $21,$24,25 + xor $31,$22 + sll $22,$24,26 + xor $31,$21 + xor $23,$30 # Ch(e,f,g) + xor $21,$22,$31 # Sigma1(e) + + srl $31,$1,2 + addu $20,$23 + lw $23,32($6) # K[8] + sll $22,$1,10 + addu $20,$21 + srl $21,$1,13 + xor $31,$22 + sll $22,$1,19 + xor $31,$21 + srl $21,$1,22 + xor $31,$22 + sll $22,$1,30 + xor $31,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#endif + sw $16,32($29) # offload to ring buffer + addu $31,$21 + and $22,$1 + addu $20,$23 # +=K[8] + addu $31,$22 # +=Maj(a,b,c) + addu $7,$20 + addu $31,$20 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $18,40($5) +#else + lwl $18,43($5) + lwr $18,40($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $17,$17 # byte swap(9) + rotr $17,$17,16 +#else + srl $22,$17,24 # byte swap(9) + srl $23,$17,8 + andi $8,$17,0xFF00 + sll $17,$17,24 + andi $23,0xFF00 + sll $8,$8,8 + or $17,$22 + or $23,$8 + or $17,$23 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $8,$24,$25 # 9 + rotr $22,$7,6 + addu $21,$17,$30 + rotr $23,$7,11 + and $8,$7 + rotr $30,$7,25 + xor $22,$23 + rotr $23,$31,2 + xor $8,$25 # Ch(e,f,g) + xor $22,$30 # Sigma1(e) + + rotr $30,$31,13 + addu $21,$8 + lw $8,36($6) # K[9] + xor $30,$23 + rotr $23,$31,22 + addu $21,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#else + addu $21,$17,$30 # 9 + srl $30,$7,6 + xor $8,$24,$25 + sll $23,$7,7 + and $8,$7 + srl $22,$7,11 + xor $30,$23 + sll $23,$7,21 + xor $30,$22 + srl $22,$7,25 + xor $30,$23 + sll $23,$7,26 + xor $30,$22 + xor $8,$25 # Ch(e,f,g) + xor $22,$23,$30 # Sigma1(e) + + srl $30,$31,2 + addu $21,$8 + lw $8,36($6) # K[9] + sll $23,$31,10 + addu $21,$22 + srl $22,$31,13 + xor $30,$23 + sll $23,$31,19 + xor $30,$22 + srl $22,$31,22 + xor $30,$23 + sll $23,$31,30 + xor $30,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#endif + sw $17,36($29) # offload to ring buffer + addu $30,$22 + and $23,$31 + addu $21,$8 # +=K[9] + addu $30,$23 # +=Maj(a,b,c) + addu $3,$21 + addu $30,$21 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $19,44($5) +#else + lwl $19,47($5) + lwr $19,44($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $18,$18 # byte swap(10) + rotr $18,$18,16 +#else + srl $23,$18,24 # byte swap(10) + srl $8,$18,8 + andi $9,$18,0xFF00 + sll $18,$18,24 + andi $8,0xFF00 + sll $9,$9,8 + or $18,$23 + or $8,$9 + or $18,$8 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $9,$7,$24 # 10 + rotr $23,$3,6 + addu $22,$18,$25 + rotr $8,$3,11 + and $9,$3 + rotr $25,$3,25 + xor $23,$8 + rotr $8,$30,2 + xor $9,$24 # Ch(e,f,g) + xor $23,$25 # Sigma1(e) + + rotr $25,$30,13 + addu $22,$9 + lw $9,40($6) # K[10] + xor $25,$8 + rotr $8,$30,22 + addu $22,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#else + addu $22,$18,$25 # 10 + srl $25,$3,6 + xor $9,$7,$24 + sll $8,$3,7 + and $9,$3 + srl $23,$3,11 + xor $25,$8 + sll $8,$3,21 + xor $25,$23 + srl $23,$3,25 + xor $25,$8 + sll $8,$3,26 + xor $25,$23 + xor $9,$24 # Ch(e,f,g) + xor $23,$8,$25 # Sigma1(e) + + srl $25,$30,2 + addu $22,$9 + lw $9,40($6) # K[10] + sll $8,$30,10 + addu $22,$23 + srl $23,$30,13 + xor $25,$8 + sll $8,$30,19 + xor $25,$23 + srl $23,$30,22 + xor $25,$8 + sll $8,$30,30 + xor $25,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#endif + sw $18,40($29) # offload to ring buffer + addu $25,$23 + and $8,$30 + addu $22,$9 # +=K[10] + addu $25,$8 # +=Maj(a,b,c) + addu $2,$22 + addu $25,$22 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $20,48($5) +#else + lwl $20,51($5) + lwr $20,48($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $19,$19 # byte swap(11) + rotr $19,$19,16 +#else + srl $8,$19,24 # byte swap(11) + srl $9,$19,8 + andi $10,$19,0xFF00 + sll $19,$19,24 + andi $9,0xFF00 + sll $10,$10,8 + or $19,$8 + or $9,$10 + or $19,$9 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $10,$3,$7 # 11 + rotr $8,$2,6 + addu $23,$19,$24 + rotr $9,$2,11 + and $10,$2 + rotr $24,$2,25 + xor $8,$9 + rotr $9,$25,2 + xor $10,$7 # Ch(e,f,g) + xor $8,$24 # Sigma1(e) + + rotr $24,$25,13 + addu $23,$10 + lw $10,44($6) # K[11] + xor $24,$9 + rotr $9,$25,22 + addu $23,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#else + addu $23,$19,$24 # 11 + srl $24,$2,6 + xor $10,$3,$7 + sll $9,$2,7 + and $10,$2 + srl $8,$2,11 + xor $24,$9 + sll $9,$2,21 + xor $24,$8 + srl $8,$2,25 + xor $24,$9 + sll $9,$2,26 + xor $24,$8 + xor $10,$7 # Ch(e,f,g) + xor $8,$9,$24 # Sigma1(e) + + srl $24,$25,2 + addu $23,$10 + lw $10,44($6) # K[11] + sll $9,$25,10 + addu $23,$8 + srl $8,$25,13 + xor $24,$9 + sll $9,$25,19 + xor $24,$8 + srl $8,$25,22 + xor $24,$9 + sll $9,$25,30 + xor $24,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#endif + sw $19,44($29) # offload to ring buffer + addu $24,$8 + and $9,$25 + addu $23,$10 # +=K[11] + addu $24,$9 # +=Maj(a,b,c) + addu $1,$23 + addu $24,$23 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $21,52($5) +#else + lwl $21,55($5) + lwr $21,52($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $20,$20 # byte swap(12) + rotr $20,$20,16 +#else + srl $9,$20,24 # byte swap(12) + srl $10,$20,8 + andi $11,$20,0xFF00 + sll $20,$20,24 + andi $10,0xFF00 + sll $11,$11,8 + or $20,$9 + or $10,$11 + or $20,$10 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $11,$2,$3 # 12 + rotr $9,$1,6 + addu $8,$20,$7 + rotr $10,$1,11 + and $11,$1 + rotr $7,$1,25 + xor $9,$10 + rotr $10,$24,2 + xor $11,$3 # Ch(e,f,g) + xor $9,$7 # Sigma1(e) + + rotr $7,$24,13 + addu $8,$11 + lw $11,48($6) # K[12] + xor $7,$10 + rotr $10,$24,22 + addu $8,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#else + addu $8,$20,$7 # 12 + srl $7,$1,6 + xor $11,$2,$3 + sll $10,$1,7 + and $11,$1 + srl $9,$1,11 + xor $7,$10 + sll $10,$1,21 + xor $7,$9 + srl $9,$1,25 + xor $7,$10 + sll $10,$1,26 + xor $7,$9 + xor $11,$3 # Ch(e,f,g) + xor $9,$10,$7 # Sigma1(e) + + srl $7,$24,2 + addu $8,$11 + lw $11,48($6) # K[12] + sll $10,$24,10 + addu $8,$9 + srl $9,$24,13 + xor $7,$10 + sll $10,$24,19 + xor $7,$9 + srl $9,$24,22 + xor $7,$10 + sll $10,$24,30 + xor $7,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#endif + sw $20,48($29) # offload to ring buffer + addu $7,$9 + and $10,$24 + addu $8,$11 # +=K[12] + addu $7,$10 # +=Maj(a,b,c) + addu $31,$8 + addu $7,$8 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $22,56($5) +#else + lwl $22,59($5) + lwr $22,56($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $21,$21 # byte swap(13) + rotr $21,$21,16 +#else + srl $10,$21,24 # byte swap(13) + srl $11,$21,8 + andi $12,$21,0xFF00 + sll $21,$21,24 + andi $11,0xFF00 + sll $12,$12,8 + or $21,$10 + or $11,$12 + or $21,$11 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $12,$1,$2 # 13 + rotr $10,$31,6 + addu $9,$21,$3 + rotr $11,$31,11 + and $12,$31 + rotr $3,$31,25 + xor $10,$11 + rotr $11,$7,2 + xor $12,$2 # Ch(e,f,g) + xor $10,$3 # Sigma1(e) + + rotr $3,$7,13 + addu $9,$12 + lw $12,52($6) # K[13] + xor $3,$11 + rotr $11,$7,22 + addu $9,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#else + addu $9,$21,$3 # 13 + srl $3,$31,6 + xor $12,$1,$2 + sll $11,$31,7 + and $12,$31 + srl $10,$31,11 + xor $3,$11 + sll $11,$31,21 + xor $3,$10 + srl $10,$31,25 + xor $3,$11 + sll $11,$31,26 + xor $3,$10 + xor $12,$2 # Ch(e,f,g) + xor $10,$11,$3 # Sigma1(e) + + srl $3,$7,2 + addu $9,$12 + lw $12,52($6) # K[13] + sll $11,$7,10 + addu $9,$10 + srl $10,$7,13 + xor $3,$11 + sll $11,$7,19 + xor $3,$10 + srl $10,$7,22 + xor $3,$11 + sll $11,$7,30 + xor $3,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#endif + sw $21,52($29) # offload to ring buffer + addu $3,$10 + and $11,$7 + addu $9,$12 # +=K[13] + addu $3,$11 # +=Maj(a,b,c) + addu $30,$9 + addu $3,$9 + lw $8,0($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + lw $23,60($5) +#else + lwl $23,63($5) + lwr $23,60($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $22,$22 # byte swap(14) + rotr $22,$22,16 +#else + srl $11,$22,24 # byte swap(14) + srl $12,$22,8 + andi $13,$22,0xFF00 + sll $22,$22,24 + andi $12,0xFF00 + sll $13,$13,8 + or $22,$11 + or $12,$13 + or $22,$12 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $13,$31,$1 # 14 + rotr $11,$30,6 + addu $10,$22,$2 + rotr $12,$30,11 + and $13,$30 + rotr $2,$30,25 + xor $11,$12 + rotr $12,$3,2 + xor $13,$1 # Ch(e,f,g) + xor $11,$2 # Sigma1(e) + + rotr $2,$3,13 + addu $10,$13 + lw $13,56($6) # K[14] + xor $2,$12 + rotr $12,$3,22 + addu $10,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#else + addu $10,$22,$2 # 14 + srl $2,$30,6 + xor $13,$31,$1 + sll $12,$30,7 + and $13,$30 + srl $11,$30,11 + xor $2,$12 + sll $12,$30,21 + xor $2,$11 + srl $11,$30,25 + xor $2,$12 + sll $12,$30,26 + xor $2,$11 + xor $13,$1 # Ch(e,f,g) + xor $11,$12,$2 # Sigma1(e) + + srl $2,$3,2 + addu $10,$13 + lw $13,56($6) # K[14] + sll $12,$3,10 + addu $10,$11 + srl $11,$3,13 + xor $2,$12 + sll $12,$3,19 + xor $2,$11 + srl $11,$3,22 + xor $2,$12 + sll $12,$3,30 + xor $2,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#endif + sw $22,56($29) # offload to ring buffer + addu $2,$11 + and $12,$3 + addu $10,$13 # +=K[14] + addu $2,$12 # +=Maj(a,b,c) + addu $25,$10 + addu $2,$10 + lw $9,4($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + wsbh $23,$23 # byte swap(15) + rotr $23,$23,16 +#else + srl $12,$23,24 # byte swap(15) + srl $13,$23,8 + andi $14,$23,0xFF00 + sll $23,$23,24 + andi $13,0xFF00 + sll $14,$14,8 + or $23,$12 + or $13,$14 + or $23,$13 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $14,$30,$31 # 15 + rotr $12,$25,6 + addu $11,$23,$1 + rotr $13,$25,11 + and $14,$25 + rotr $1,$25,25 + xor $12,$13 + rotr $13,$2,2 + xor $14,$31 # Ch(e,f,g) + xor $12,$1 # Sigma1(e) + + rotr $1,$2,13 + addu $11,$14 + lw $14,60($6) # K[15] + xor $1,$13 + rotr $13,$2,22 + addu $11,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#else + addu $11,$23,$1 # 15 + srl $1,$25,6 + xor $14,$30,$31 + sll $13,$25,7 + and $14,$25 + srl $12,$25,11 + xor $1,$13 + sll $13,$25,21 + xor $1,$12 + srl $12,$25,25 + xor $1,$13 + sll $13,$25,26 + xor $1,$12 + xor $14,$31 # Ch(e,f,g) + xor $12,$13,$1 # Sigma1(e) + + srl $1,$2,2 + addu $11,$14 + lw $14,60($6) # K[15] + sll $13,$2,10 + addu $11,$12 + srl $12,$2,13 + xor $1,$13 + sll $13,$2,19 + xor $1,$12 + srl $12,$2,22 + xor $1,$13 + sll $13,$2,30 + xor $1,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#endif + sw $23,60($29) # offload to ring buffer + addu $1,$12 + and $13,$2 + addu $11,$14 # +=K[15] + addu $1,$13 # +=Maj(a,b,c) + addu $24,$11 + addu $1,$11 + lw $10,8($29) # prefetch from ring buffer + b .L16_xx +.align 4 +.L16_xx: +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $14,$9,3 # Xupdate(16) + rotr $12,$9,7 + addu $8,$17 # +=X[i+9] + xor $14,$12 + rotr $12,$9,18 + + srl $15,$22,10 + rotr $13,$22,17 + xor $14,$12 # sigma0(X[i+1]) + rotr $12,$22,19 + xor $15,$13 + addu $8,$14 +#else + srl $14,$9,3 # Xupdate(16) + addu $8,$17 # +=X[i+9] + sll $13,$9,14 + srl $12,$9,7 + xor $14,$13 + sll $13,11 + xor $14,$12 + srl $12,$9,18 + xor $14,$13 + + srl $15,$22,10 + xor $14,$12 # sigma0(X[i+1]) + sll $13,$22,13 + addu $8,$14 + srl $12,$22,17 + xor $15,$13 + sll $13,2 + xor $15,$12 + srl $12,$22,19 + xor $15,$13 +#endif + xor $15,$12 # sigma1(X[i+14]) + addu $8,$15 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $15,$25,$30 # 16 + rotr $13,$24,6 + addu $12,$8,$31 + rotr $14,$24,11 + and $15,$24 + rotr $31,$24,25 + xor $13,$14 + rotr $14,$1,2 + xor $15,$30 # Ch(e,f,g) + xor $13,$31 # Sigma1(e) + + rotr $31,$1,13 + addu $12,$15 + lw $15,64($6) # K[16] + xor $31,$14 + rotr $14,$1,22 + addu $12,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#else + addu $12,$8,$31 # 16 + srl $31,$24,6 + xor $15,$25,$30 + sll $14,$24,7 + and $15,$24 + srl $13,$24,11 + xor $31,$14 + sll $14,$24,21 + xor $31,$13 + srl $13,$24,25 + xor $31,$14 + sll $14,$24,26 + xor $31,$13 + xor $15,$30 # Ch(e,f,g) + xor $13,$14,$31 # Sigma1(e) + + srl $31,$1,2 + addu $12,$15 + lw $15,64($6) # K[16] + sll $14,$1,10 + addu $12,$13 + srl $13,$1,13 + xor $31,$14 + sll $14,$1,19 + xor $31,$13 + srl $13,$1,22 + xor $31,$14 + sll $14,$1,30 + xor $31,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#endif + sw $8,0($29) # offload to ring buffer + addu $31,$13 + and $14,$1 + addu $12,$15 # +=K[16] + addu $31,$14 # +=Maj(a,b,c) + addu $7,$12 + addu $31,$12 + lw $11,12($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $15,$10,3 # Xupdate(17) + rotr $13,$10,7 + addu $9,$18 # +=X[i+9] + xor $15,$13 + rotr $13,$10,18 + + srl $16,$23,10 + rotr $14,$23,17 + xor $15,$13 # sigma0(X[i+1]) + rotr $13,$23,19 + xor $16,$14 + addu $9,$15 +#else + srl $15,$10,3 # Xupdate(17) + addu $9,$18 # +=X[i+9] + sll $14,$10,14 + srl $13,$10,7 + xor $15,$14 + sll $14,11 + xor $15,$13 + srl $13,$10,18 + xor $15,$14 + + srl $16,$23,10 + xor $15,$13 # sigma0(X[i+1]) + sll $14,$23,13 + addu $9,$15 + srl $13,$23,17 + xor $16,$14 + sll $14,2 + xor $16,$13 + srl $13,$23,19 + xor $16,$14 +#endif + xor $16,$13 # sigma1(X[i+14]) + addu $9,$16 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $16,$24,$25 # 17 + rotr $14,$7,6 + addu $13,$9,$30 + rotr $15,$7,11 + and $16,$7 + rotr $30,$7,25 + xor $14,$15 + rotr $15,$31,2 + xor $16,$25 # Ch(e,f,g) + xor $14,$30 # Sigma1(e) + + rotr $30,$31,13 + addu $13,$16 + lw $16,68($6) # K[17] + xor $30,$15 + rotr $15,$31,22 + addu $13,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#else + addu $13,$9,$30 # 17 + srl $30,$7,6 + xor $16,$24,$25 + sll $15,$7,7 + and $16,$7 + srl $14,$7,11 + xor $30,$15 + sll $15,$7,21 + xor $30,$14 + srl $14,$7,25 + xor $30,$15 + sll $15,$7,26 + xor $30,$14 + xor $16,$25 # Ch(e,f,g) + xor $14,$15,$30 # Sigma1(e) + + srl $30,$31,2 + addu $13,$16 + lw $16,68($6) # K[17] + sll $15,$31,10 + addu $13,$14 + srl $14,$31,13 + xor $30,$15 + sll $15,$31,19 + xor $30,$14 + srl $14,$31,22 + xor $30,$15 + sll $15,$31,30 + xor $30,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#endif + sw $9,4($29) # offload to ring buffer + addu $30,$14 + and $15,$31 + addu $13,$16 # +=K[17] + addu $30,$15 # +=Maj(a,b,c) + addu $3,$13 + addu $30,$13 + lw $12,16($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $16,$11,3 # Xupdate(18) + rotr $14,$11,7 + addu $10,$19 # +=X[i+9] + xor $16,$14 + rotr $14,$11,18 + + srl $17,$8,10 + rotr $15,$8,17 + xor $16,$14 # sigma0(X[i+1]) + rotr $14,$8,19 + xor $17,$15 + addu $10,$16 +#else + srl $16,$11,3 # Xupdate(18) + addu $10,$19 # +=X[i+9] + sll $15,$11,14 + srl $14,$11,7 + xor $16,$15 + sll $15,11 + xor $16,$14 + srl $14,$11,18 + xor $16,$15 + + srl $17,$8,10 + xor $16,$14 # sigma0(X[i+1]) + sll $15,$8,13 + addu $10,$16 + srl $14,$8,17 + xor $17,$15 + sll $15,2 + xor $17,$14 + srl $14,$8,19 + xor $17,$15 +#endif + xor $17,$14 # sigma1(X[i+14]) + addu $10,$17 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $17,$7,$24 # 18 + rotr $15,$3,6 + addu $14,$10,$25 + rotr $16,$3,11 + and $17,$3 + rotr $25,$3,25 + xor $15,$16 + rotr $16,$30,2 + xor $17,$24 # Ch(e,f,g) + xor $15,$25 # Sigma1(e) + + rotr $25,$30,13 + addu $14,$17 + lw $17,72($6) # K[18] + xor $25,$16 + rotr $16,$30,22 + addu $14,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#else + addu $14,$10,$25 # 18 + srl $25,$3,6 + xor $17,$7,$24 + sll $16,$3,7 + and $17,$3 + srl $15,$3,11 + xor $25,$16 + sll $16,$3,21 + xor $25,$15 + srl $15,$3,25 + xor $25,$16 + sll $16,$3,26 + xor $25,$15 + xor $17,$24 # Ch(e,f,g) + xor $15,$16,$25 # Sigma1(e) + + srl $25,$30,2 + addu $14,$17 + lw $17,72($6) # K[18] + sll $16,$30,10 + addu $14,$15 + srl $15,$30,13 + xor $25,$16 + sll $16,$30,19 + xor $25,$15 + srl $15,$30,22 + xor $25,$16 + sll $16,$30,30 + xor $25,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#endif + sw $10,8($29) # offload to ring buffer + addu $25,$15 + and $16,$30 + addu $14,$17 # +=K[18] + addu $25,$16 # +=Maj(a,b,c) + addu $2,$14 + addu $25,$14 + lw $13,20($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $17,$12,3 # Xupdate(19) + rotr $15,$12,7 + addu $11,$20 # +=X[i+9] + xor $17,$15 + rotr $15,$12,18 + + srl $18,$9,10 + rotr $16,$9,17 + xor $17,$15 # sigma0(X[i+1]) + rotr $15,$9,19 + xor $18,$16 + addu $11,$17 +#else + srl $17,$12,3 # Xupdate(19) + addu $11,$20 # +=X[i+9] + sll $16,$12,14 + srl $15,$12,7 + xor $17,$16 + sll $16,11 + xor $17,$15 + srl $15,$12,18 + xor $17,$16 + + srl $18,$9,10 + xor $17,$15 # sigma0(X[i+1]) + sll $16,$9,13 + addu $11,$17 + srl $15,$9,17 + xor $18,$16 + sll $16,2 + xor $18,$15 + srl $15,$9,19 + xor $18,$16 +#endif + xor $18,$15 # sigma1(X[i+14]) + addu $11,$18 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $18,$3,$7 # 19 + rotr $16,$2,6 + addu $15,$11,$24 + rotr $17,$2,11 + and $18,$2 + rotr $24,$2,25 + xor $16,$17 + rotr $17,$25,2 + xor $18,$7 # Ch(e,f,g) + xor $16,$24 # Sigma1(e) + + rotr $24,$25,13 + addu $15,$18 + lw $18,76($6) # K[19] + xor $24,$17 + rotr $17,$25,22 + addu $15,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#else + addu $15,$11,$24 # 19 + srl $24,$2,6 + xor $18,$3,$7 + sll $17,$2,7 + and $18,$2 + srl $16,$2,11 + xor $24,$17 + sll $17,$2,21 + xor $24,$16 + srl $16,$2,25 + xor $24,$17 + sll $17,$2,26 + xor $24,$16 + xor $18,$7 # Ch(e,f,g) + xor $16,$17,$24 # Sigma1(e) + + srl $24,$25,2 + addu $15,$18 + lw $18,76($6) # K[19] + sll $17,$25,10 + addu $15,$16 + srl $16,$25,13 + xor $24,$17 + sll $17,$25,19 + xor $24,$16 + srl $16,$25,22 + xor $24,$17 + sll $17,$25,30 + xor $24,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#endif + sw $11,12($29) # offload to ring buffer + addu $24,$16 + and $17,$25 + addu $15,$18 # +=K[19] + addu $24,$17 # +=Maj(a,b,c) + addu $1,$15 + addu $24,$15 + lw $14,24($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $18,$13,3 # Xupdate(20) + rotr $16,$13,7 + addu $12,$21 # +=X[i+9] + xor $18,$16 + rotr $16,$13,18 + + srl $19,$10,10 + rotr $17,$10,17 + xor $18,$16 # sigma0(X[i+1]) + rotr $16,$10,19 + xor $19,$17 + addu $12,$18 +#else + srl $18,$13,3 # Xupdate(20) + addu $12,$21 # +=X[i+9] + sll $17,$13,14 + srl $16,$13,7 + xor $18,$17 + sll $17,11 + xor $18,$16 + srl $16,$13,18 + xor $18,$17 + + srl $19,$10,10 + xor $18,$16 # sigma0(X[i+1]) + sll $17,$10,13 + addu $12,$18 + srl $16,$10,17 + xor $19,$17 + sll $17,2 + xor $19,$16 + srl $16,$10,19 + xor $19,$17 +#endif + xor $19,$16 # sigma1(X[i+14]) + addu $12,$19 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $19,$2,$3 # 20 + rotr $17,$1,6 + addu $16,$12,$7 + rotr $18,$1,11 + and $19,$1 + rotr $7,$1,25 + xor $17,$18 + rotr $18,$24,2 + xor $19,$3 # Ch(e,f,g) + xor $17,$7 # Sigma1(e) + + rotr $7,$24,13 + addu $16,$19 + lw $19,80($6) # K[20] + xor $7,$18 + rotr $18,$24,22 + addu $16,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#else + addu $16,$12,$7 # 20 + srl $7,$1,6 + xor $19,$2,$3 + sll $18,$1,7 + and $19,$1 + srl $17,$1,11 + xor $7,$18 + sll $18,$1,21 + xor $7,$17 + srl $17,$1,25 + xor $7,$18 + sll $18,$1,26 + xor $7,$17 + xor $19,$3 # Ch(e,f,g) + xor $17,$18,$7 # Sigma1(e) + + srl $7,$24,2 + addu $16,$19 + lw $19,80($6) # K[20] + sll $18,$24,10 + addu $16,$17 + srl $17,$24,13 + xor $7,$18 + sll $18,$24,19 + xor $7,$17 + srl $17,$24,22 + xor $7,$18 + sll $18,$24,30 + xor $7,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#endif + sw $12,16($29) # offload to ring buffer + addu $7,$17 + and $18,$24 + addu $16,$19 # +=K[20] + addu $7,$18 # +=Maj(a,b,c) + addu $31,$16 + addu $7,$16 + lw $15,28($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $19,$14,3 # Xupdate(21) + rotr $17,$14,7 + addu $13,$22 # +=X[i+9] + xor $19,$17 + rotr $17,$14,18 + + srl $20,$11,10 + rotr $18,$11,17 + xor $19,$17 # sigma0(X[i+1]) + rotr $17,$11,19 + xor $20,$18 + addu $13,$19 +#else + srl $19,$14,3 # Xupdate(21) + addu $13,$22 # +=X[i+9] + sll $18,$14,14 + srl $17,$14,7 + xor $19,$18 + sll $18,11 + xor $19,$17 + srl $17,$14,18 + xor $19,$18 + + srl $20,$11,10 + xor $19,$17 # sigma0(X[i+1]) + sll $18,$11,13 + addu $13,$19 + srl $17,$11,17 + xor $20,$18 + sll $18,2 + xor $20,$17 + srl $17,$11,19 + xor $20,$18 +#endif + xor $20,$17 # sigma1(X[i+14]) + addu $13,$20 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $20,$1,$2 # 21 + rotr $18,$31,6 + addu $17,$13,$3 + rotr $19,$31,11 + and $20,$31 + rotr $3,$31,25 + xor $18,$19 + rotr $19,$7,2 + xor $20,$2 # Ch(e,f,g) + xor $18,$3 # Sigma1(e) + + rotr $3,$7,13 + addu $17,$20 + lw $20,84($6) # K[21] + xor $3,$19 + rotr $19,$7,22 + addu $17,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#else + addu $17,$13,$3 # 21 + srl $3,$31,6 + xor $20,$1,$2 + sll $19,$31,7 + and $20,$31 + srl $18,$31,11 + xor $3,$19 + sll $19,$31,21 + xor $3,$18 + srl $18,$31,25 + xor $3,$19 + sll $19,$31,26 + xor $3,$18 + xor $20,$2 # Ch(e,f,g) + xor $18,$19,$3 # Sigma1(e) + + srl $3,$7,2 + addu $17,$20 + lw $20,84($6) # K[21] + sll $19,$7,10 + addu $17,$18 + srl $18,$7,13 + xor $3,$19 + sll $19,$7,19 + xor $3,$18 + srl $18,$7,22 + xor $3,$19 + sll $19,$7,30 + xor $3,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#endif + sw $13,20($29) # offload to ring buffer + addu $3,$18 + and $19,$7 + addu $17,$20 # +=K[21] + addu $3,$19 # +=Maj(a,b,c) + addu $30,$17 + addu $3,$17 + lw $16,32($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $20,$15,3 # Xupdate(22) + rotr $18,$15,7 + addu $14,$23 # +=X[i+9] + xor $20,$18 + rotr $18,$15,18 + + srl $21,$12,10 + rotr $19,$12,17 + xor $20,$18 # sigma0(X[i+1]) + rotr $18,$12,19 + xor $21,$19 + addu $14,$20 +#else + srl $20,$15,3 # Xupdate(22) + addu $14,$23 # +=X[i+9] + sll $19,$15,14 + srl $18,$15,7 + xor $20,$19 + sll $19,11 + xor $20,$18 + srl $18,$15,18 + xor $20,$19 + + srl $21,$12,10 + xor $20,$18 # sigma0(X[i+1]) + sll $19,$12,13 + addu $14,$20 + srl $18,$12,17 + xor $21,$19 + sll $19,2 + xor $21,$18 + srl $18,$12,19 + xor $21,$19 +#endif + xor $21,$18 # sigma1(X[i+14]) + addu $14,$21 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $21,$31,$1 # 22 + rotr $19,$30,6 + addu $18,$14,$2 + rotr $20,$30,11 + and $21,$30 + rotr $2,$30,25 + xor $19,$20 + rotr $20,$3,2 + xor $21,$1 # Ch(e,f,g) + xor $19,$2 # Sigma1(e) + + rotr $2,$3,13 + addu $18,$21 + lw $21,88($6) # K[22] + xor $2,$20 + rotr $20,$3,22 + addu $18,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#else + addu $18,$14,$2 # 22 + srl $2,$30,6 + xor $21,$31,$1 + sll $20,$30,7 + and $21,$30 + srl $19,$30,11 + xor $2,$20 + sll $20,$30,21 + xor $2,$19 + srl $19,$30,25 + xor $2,$20 + sll $20,$30,26 + xor $2,$19 + xor $21,$1 # Ch(e,f,g) + xor $19,$20,$2 # Sigma1(e) + + srl $2,$3,2 + addu $18,$21 + lw $21,88($6) # K[22] + sll $20,$3,10 + addu $18,$19 + srl $19,$3,13 + xor $2,$20 + sll $20,$3,19 + xor $2,$19 + srl $19,$3,22 + xor $2,$20 + sll $20,$3,30 + xor $2,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#endif + sw $14,24($29) # offload to ring buffer + addu $2,$19 + and $20,$3 + addu $18,$21 # +=K[22] + addu $2,$20 # +=Maj(a,b,c) + addu $25,$18 + addu $2,$18 + lw $17,36($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $21,$16,3 # Xupdate(23) + rotr $19,$16,7 + addu $15,$8 # +=X[i+9] + xor $21,$19 + rotr $19,$16,18 + + srl $22,$13,10 + rotr $20,$13,17 + xor $21,$19 # sigma0(X[i+1]) + rotr $19,$13,19 + xor $22,$20 + addu $15,$21 +#else + srl $21,$16,3 # Xupdate(23) + addu $15,$8 # +=X[i+9] + sll $20,$16,14 + srl $19,$16,7 + xor $21,$20 + sll $20,11 + xor $21,$19 + srl $19,$16,18 + xor $21,$20 + + srl $22,$13,10 + xor $21,$19 # sigma0(X[i+1]) + sll $20,$13,13 + addu $15,$21 + srl $19,$13,17 + xor $22,$20 + sll $20,2 + xor $22,$19 + srl $19,$13,19 + xor $22,$20 +#endif + xor $22,$19 # sigma1(X[i+14]) + addu $15,$22 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $22,$30,$31 # 23 + rotr $20,$25,6 + addu $19,$15,$1 + rotr $21,$25,11 + and $22,$25 + rotr $1,$25,25 + xor $20,$21 + rotr $21,$2,2 + xor $22,$31 # Ch(e,f,g) + xor $20,$1 # Sigma1(e) + + rotr $1,$2,13 + addu $19,$22 + lw $22,92($6) # K[23] + xor $1,$21 + rotr $21,$2,22 + addu $19,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#else + addu $19,$15,$1 # 23 + srl $1,$25,6 + xor $22,$30,$31 + sll $21,$25,7 + and $22,$25 + srl $20,$25,11 + xor $1,$21 + sll $21,$25,21 + xor $1,$20 + srl $20,$25,25 + xor $1,$21 + sll $21,$25,26 + xor $1,$20 + xor $22,$31 # Ch(e,f,g) + xor $20,$21,$1 # Sigma1(e) + + srl $1,$2,2 + addu $19,$22 + lw $22,92($6) # K[23] + sll $21,$2,10 + addu $19,$20 + srl $20,$2,13 + xor $1,$21 + sll $21,$2,19 + xor $1,$20 + srl $20,$2,22 + xor $1,$21 + sll $21,$2,30 + xor $1,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#endif + sw $15,28($29) # offload to ring buffer + addu $1,$20 + and $21,$2 + addu $19,$22 # +=K[23] + addu $1,$21 # +=Maj(a,b,c) + addu $24,$19 + addu $1,$19 + lw $18,40($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $22,$17,3 # Xupdate(24) + rotr $20,$17,7 + addu $16,$9 # +=X[i+9] + xor $22,$20 + rotr $20,$17,18 + + srl $23,$14,10 + rotr $21,$14,17 + xor $22,$20 # sigma0(X[i+1]) + rotr $20,$14,19 + xor $23,$21 + addu $16,$22 +#else + srl $22,$17,3 # Xupdate(24) + addu $16,$9 # +=X[i+9] + sll $21,$17,14 + srl $20,$17,7 + xor $22,$21 + sll $21,11 + xor $22,$20 + srl $20,$17,18 + xor $22,$21 + + srl $23,$14,10 + xor $22,$20 # sigma0(X[i+1]) + sll $21,$14,13 + addu $16,$22 + srl $20,$14,17 + xor $23,$21 + sll $21,2 + xor $23,$20 + srl $20,$14,19 + xor $23,$21 +#endif + xor $23,$20 # sigma1(X[i+14]) + addu $16,$23 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $23,$25,$30 # 24 + rotr $21,$24,6 + addu $20,$16,$31 + rotr $22,$24,11 + and $23,$24 + rotr $31,$24,25 + xor $21,$22 + rotr $22,$1,2 + xor $23,$30 # Ch(e,f,g) + xor $21,$31 # Sigma1(e) + + rotr $31,$1,13 + addu $20,$23 + lw $23,96($6) # K[24] + xor $31,$22 + rotr $22,$1,22 + addu $20,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#else + addu $20,$16,$31 # 24 + srl $31,$24,6 + xor $23,$25,$30 + sll $22,$24,7 + and $23,$24 + srl $21,$24,11 + xor $31,$22 + sll $22,$24,21 + xor $31,$21 + srl $21,$24,25 + xor $31,$22 + sll $22,$24,26 + xor $31,$21 + xor $23,$30 # Ch(e,f,g) + xor $21,$22,$31 # Sigma1(e) + + srl $31,$1,2 + addu $20,$23 + lw $23,96($6) # K[24] + sll $22,$1,10 + addu $20,$21 + srl $21,$1,13 + xor $31,$22 + sll $22,$1,19 + xor $31,$21 + srl $21,$1,22 + xor $31,$22 + sll $22,$1,30 + xor $31,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#endif + sw $16,32($29) # offload to ring buffer + addu $31,$21 + and $22,$1 + addu $20,$23 # +=K[24] + addu $31,$22 # +=Maj(a,b,c) + addu $7,$20 + addu $31,$20 + lw $19,44($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $23,$18,3 # Xupdate(25) + rotr $21,$18,7 + addu $17,$10 # +=X[i+9] + xor $23,$21 + rotr $21,$18,18 + + srl $8,$15,10 + rotr $22,$15,17 + xor $23,$21 # sigma0(X[i+1]) + rotr $21,$15,19 + xor $8,$22 + addu $17,$23 +#else + srl $23,$18,3 # Xupdate(25) + addu $17,$10 # +=X[i+9] + sll $22,$18,14 + srl $21,$18,7 + xor $23,$22 + sll $22,11 + xor $23,$21 + srl $21,$18,18 + xor $23,$22 + + srl $8,$15,10 + xor $23,$21 # sigma0(X[i+1]) + sll $22,$15,13 + addu $17,$23 + srl $21,$15,17 + xor $8,$22 + sll $22,2 + xor $8,$21 + srl $21,$15,19 + xor $8,$22 +#endif + xor $8,$21 # sigma1(X[i+14]) + addu $17,$8 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $8,$24,$25 # 25 + rotr $22,$7,6 + addu $21,$17,$30 + rotr $23,$7,11 + and $8,$7 + rotr $30,$7,25 + xor $22,$23 + rotr $23,$31,2 + xor $8,$25 # Ch(e,f,g) + xor $22,$30 # Sigma1(e) + + rotr $30,$31,13 + addu $21,$8 + lw $8,100($6) # K[25] + xor $30,$23 + rotr $23,$31,22 + addu $21,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#else + addu $21,$17,$30 # 25 + srl $30,$7,6 + xor $8,$24,$25 + sll $23,$7,7 + and $8,$7 + srl $22,$7,11 + xor $30,$23 + sll $23,$7,21 + xor $30,$22 + srl $22,$7,25 + xor $30,$23 + sll $23,$7,26 + xor $30,$22 + xor $8,$25 # Ch(e,f,g) + xor $22,$23,$30 # Sigma1(e) + + srl $30,$31,2 + addu $21,$8 + lw $8,100($6) # K[25] + sll $23,$31,10 + addu $21,$22 + srl $22,$31,13 + xor $30,$23 + sll $23,$31,19 + xor $30,$22 + srl $22,$31,22 + xor $30,$23 + sll $23,$31,30 + xor $30,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#endif + sw $17,36($29) # offload to ring buffer + addu $30,$22 + and $23,$31 + addu $21,$8 # +=K[25] + addu $30,$23 # +=Maj(a,b,c) + addu $3,$21 + addu $30,$21 + lw $20,48($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $8,$19,3 # Xupdate(26) + rotr $22,$19,7 + addu $18,$11 # +=X[i+9] + xor $8,$22 + rotr $22,$19,18 + + srl $9,$16,10 + rotr $23,$16,17 + xor $8,$22 # sigma0(X[i+1]) + rotr $22,$16,19 + xor $9,$23 + addu $18,$8 +#else + srl $8,$19,3 # Xupdate(26) + addu $18,$11 # +=X[i+9] + sll $23,$19,14 + srl $22,$19,7 + xor $8,$23 + sll $23,11 + xor $8,$22 + srl $22,$19,18 + xor $8,$23 + + srl $9,$16,10 + xor $8,$22 # sigma0(X[i+1]) + sll $23,$16,13 + addu $18,$8 + srl $22,$16,17 + xor $9,$23 + sll $23,2 + xor $9,$22 + srl $22,$16,19 + xor $9,$23 +#endif + xor $9,$22 # sigma1(X[i+14]) + addu $18,$9 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $9,$7,$24 # 26 + rotr $23,$3,6 + addu $22,$18,$25 + rotr $8,$3,11 + and $9,$3 + rotr $25,$3,25 + xor $23,$8 + rotr $8,$30,2 + xor $9,$24 # Ch(e,f,g) + xor $23,$25 # Sigma1(e) + + rotr $25,$30,13 + addu $22,$9 + lw $9,104($6) # K[26] + xor $25,$8 + rotr $8,$30,22 + addu $22,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#else + addu $22,$18,$25 # 26 + srl $25,$3,6 + xor $9,$7,$24 + sll $8,$3,7 + and $9,$3 + srl $23,$3,11 + xor $25,$8 + sll $8,$3,21 + xor $25,$23 + srl $23,$3,25 + xor $25,$8 + sll $8,$3,26 + xor $25,$23 + xor $9,$24 # Ch(e,f,g) + xor $23,$8,$25 # Sigma1(e) + + srl $25,$30,2 + addu $22,$9 + lw $9,104($6) # K[26] + sll $8,$30,10 + addu $22,$23 + srl $23,$30,13 + xor $25,$8 + sll $8,$30,19 + xor $25,$23 + srl $23,$30,22 + xor $25,$8 + sll $8,$30,30 + xor $25,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#endif + sw $18,40($29) # offload to ring buffer + addu $25,$23 + and $8,$30 + addu $22,$9 # +=K[26] + addu $25,$8 # +=Maj(a,b,c) + addu $2,$22 + addu $25,$22 + lw $21,52($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $9,$20,3 # Xupdate(27) + rotr $23,$20,7 + addu $19,$12 # +=X[i+9] + xor $9,$23 + rotr $23,$20,18 + + srl $10,$17,10 + rotr $8,$17,17 + xor $9,$23 # sigma0(X[i+1]) + rotr $23,$17,19 + xor $10,$8 + addu $19,$9 +#else + srl $9,$20,3 # Xupdate(27) + addu $19,$12 # +=X[i+9] + sll $8,$20,14 + srl $23,$20,7 + xor $9,$8 + sll $8,11 + xor $9,$23 + srl $23,$20,18 + xor $9,$8 + + srl $10,$17,10 + xor $9,$23 # sigma0(X[i+1]) + sll $8,$17,13 + addu $19,$9 + srl $23,$17,17 + xor $10,$8 + sll $8,2 + xor $10,$23 + srl $23,$17,19 + xor $10,$8 +#endif + xor $10,$23 # sigma1(X[i+14]) + addu $19,$10 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $10,$3,$7 # 27 + rotr $8,$2,6 + addu $23,$19,$24 + rotr $9,$2,11 + and $10,$2 + rotr $24,$2,25 + xor $8,$9 + rotr $9,$25,2 + xor $10,$7 # Ch(e,f,g) + xor $8,$24 # Sigma1(e) + + rotr $24,$25,13 + addu $23,$10 + lw $10,108($6) # K[27] + xor $24,$9 + rotr $9,$25,22 + addu $23,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#else + addu $23,$19,$24 # 27 + srl $24,$2,6 + xor $10,$3,$7 + sll $9,$2,7 + and $10,$2 + srl $8,$2,11 + xor $24,$9 + sll $9,$2,21 + xor $24,$8 + srl $8,$2,25 + xor $24,$9 + sll $9,$2,26 + xor $24,$8 + xor $10,$7 # Ch(e,f,g) + xor $8,$9,$24 # Sigma1(e) + + srl $24,$25,2 + addu $23,$10 + lw $10,108($6) # K[27] + sll $9,$25,10 + addu $23,$8 + srl $8,$25,13 + xor $24,$9 + sll $9,$25,19 + xor $24,$8 + srl $8,$25,22 + xor $24,$9 + sll $9,$25,30 + xor $24,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#endif + sw $19,44($29) # offload to ring buffer + addu $24,$8 + and $9,$25 + addu $23,$10 # +=K[27] + addu $24,$9 # +=Maj(a,b,c) + addu $1,$23 + addu $24,$23 + lw $22,56($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $10,$21,3 # Xupdate(28) + rotr $8,$21,7 + addu $20,$13 # +=X[i+9] + xor $10,$8 + rotr $8,$21,18 + + srl $11,$18,10 + rotr $9,$18,17 + xor $10,$8 # sigma0(X[i+1]) + rotr $8,$18,19 + xor $11,$9 + addu $20,$10 +#else + srl $10,$21,3 # Xupdate(28) + addu $20,$13 # +=X[i+9] + sll $9,$21,14 + srl $8,$21,7 + xor $10,$9 + sll $9,11 + xor $10,$8 + srl $8,$21,18 + xor $10,$9 + + srl $11,$18,10 + xor $10,$8 # sigma0(X[i+1]) + sll $9,$18,13 + addu $20,$10 + srl $8,$18,17 + xor $11,$9 + sll $9,2 + xor $11,$8 + srl $8,$18,19 + xor $11,$9 +#endif + xor $11,$8 # sigma1(X[i+14]) + addu $20,$11 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $11,$2,$3 # 28 + rotr $9,$1,6 + addu $8,$20,$7 + rotr $10,$1,11 + and $11,$1 + rotr $7,$1,25 + xor $9,$10 + rotr $10,$24,2 + xor $11,$3 # Ch(e,f,g) + xor $9,$7 # Sigma1(e) + + rotr $7,$24,13 + addu $8,$11 + lw $11,112($6) # K[28] + xor $7,$10 + rotr $10,$24,22 + addu $8,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#else + addu $8,$20,$7 # 28 + srl $7,$1,6 + xor $11,$2,$3 + sll $10,$1,7 + and $11,$1 + srl $9,$1,11 + xor $7,$10 + sll $10,$1,21 + xor $7,$9 + srl $9,$1,25 + xor $7,$10 + sll $10,$1,26 + xor $7,$9 + xor $11,$3 # Ch(e,f,g) + xor $9,$10,$7 # Sigma1(e) + + srl $7,$24,2 + addu $8,$11 + lw $11,112($6) # K[28] + sll $10,$24,10 + addu $8,$9 + srl $9,$24,13 + xor $7,$10 + sll $10,$24,19 + xor $7,$9 + srl $9,$24,22 + xor $7,$10 + sll $10,$24,30 + xor $7,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#endif + sw $20,48($29) # offload to ring buffer + addu $7,$9 + and $10,$24 + addu $8,$11 # +=K[28] + addu $7,$10 # +=Maj(a,b,c) + addu $31,$8 + addu $7,$8 + lw $23,60($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $11,$22,3 # Xupdate(29) + rotr $9,$22,7 + addu $21,$14 # +=X[i+9] + xor $11,$9 + rotr $9,$22,18 + + srl $12,$19,10 + rotr $10,$19,17 + xor $11,$9 # sigma0(X[i+1]) + rotr $9,$19,19 + xor $12,$10 + addu $21,$11 +#else + srl $11,$22,3 # Xupdate(29) + addu $21,$14 # +=X[i+9] + sll $10,$22,14 + srl $9,$22,7 + xor $11,$10 + sll $10,11 + xor $11,$9 + srl $9,$22,18 + xor $11,$10 + + srl $12,$19,10 + xor $11,$9 # sigma0(X[i+1]) + sll $10,$19,13 + addu $21,$11 + srl $9,$19,17 + xor $12,$10 + sll $10,2 + xor $12,$9 + srl $9,$19,19 + xor $12,$10 +#endif + xor $12,$9 # sigma1(X[i+14]) + addu $21,$12 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $12,$1,$2 # 29 + rotr $10,$31,6 + addu $9,$21,$3 + rotr $11,$31,11 + and $12,$31 + rotr $3,$31,25 + xor $10,$11 + rotr $11,$7,2 + xor $12,$2 # Ch(e,f,g) + xor $10,$3 # Sigma1(e) + + rotr $3,$7,13 + addu $9,$12 + lw $12,116($6) # K[29] + xor $3,$11 + rotr $11,$7,22 + addu $9,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#else + addu $9,$21,$3 # 29 + srl $3,$31,6 + xor $12,$1,$2 + sll $11,$31,7 + and $12,$31 + srl $10,$31,11 + xor $3,$11 + sll $11,$31,21 + xor $3,$10 + srl $10,$31,25 + xor $3,$11 + sll $11,$31,26 + xor $3,$10 + xor $12,$2 # Ch(e,f,g) + xor $10,$11,$3 # Sigma1(e) + + srl $3,$7,2 + addu $9,$12 + lw $12,116($6) # K[29] + sll $11,$7,10 + addu $9,$10 + srl $10,$7,13 + xor $3,$11 + sll $11,$7,19 + xor $3,$10 + srl $10,$7,22 + xor $3,$11 + sll $11,$7,30 + xor $3,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#endif + sw $21,52($29) # offload to ring buffer + addu $3,$10 + and $11,$7 + addu $9,$12 # +=K[29] + addu $3,$11 # +=Maj(a,b,c) + addu $30,$9 + addu $3,$9 + lw $8,0($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $12,$23,3 # Xupdate(30) + rotr $10,$23,7 + addu $22,$15 # +=X[i+9] + xor $12,$10 + rotr $10,$23,18 + + srl $13,$20,10 + rotr $11,$20,17 + xor $12,$10 # sigma0(X[i+1]) + rotr $10,$20,19 + xor $13,$11 + addu $22,$12 +#else + srl $12,$23,3 # Xupdate(30) + addu $22,$15 # +=X[i+9] + sll $11,$23,14 + srl $10,$23,7 + xor $12,$11 + sll $11,11 + xor $12,$10 + srl $10,$23,18 + xor $12,$11 + + srl $13,$20,10 + xor $12,$10 # sigma0(X[i+1]) + sll $11,$20,13 + addu $22,$12 + srl $10,$20,17 + xor $13,$11 + sll $11,2 + xor $13,$10 + srl $10,$20,19 + xor $13,$11 +#endif + xor $13,$10 # sigma1(X[i+14]) + addu $22,$13 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $13,$31,$1 # 30 + rotr $11,$30,6 + addu $10,$22,$2 + rotr $12,$30,11 + and $13,$30 + rotr $2,$30,25 + xor $11,$12 + rotr $12,$3,2 + xor $13,$1 # Ch(e,f,g) + xor $11,$2 # Sigma1(e) + + rotr $2,$3,13 + addu $10,$13 + lw $13,120($6) # K[30] + xor $2,$12 + rotr $12,$3,22 + addu $10,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#else + addu $10,$22,$2 # 30 + srl $2,$30,6 + xor $13,$31,$1 + sll $12,$30,7 + and $13,$30 + srl $11,$30,11 + xor $2,$12 + sll $12,$30,21 + xor $2,$11 + srl $11,$30,25 + xor $2,$12 + sll $12,$30,26 + xor $2,$11 + xor $13,$1 # Ch(e,f,g) + xor $11,$12,$2 # Sigma1(e) + + srl $2,$3,2 + addu $10,$13 + lw $13,120($6) # K[30] + sll $12,$3,10 + addu $10,$11 + srl $11,$3,13 + xor $2,$12 + sll $12,$3,19 + xor $2,$11 + srl $11,$3,22 + xor $2,$12 + sll $12,$3,30 + xor $2,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#endif + sw $22,56($29) # offload to ring buffer + addu $2,$11 + and $12,$3 + addu $10,$13 # +=K[30] + addu $2,$12 # +=Maj(a,b,c) + addu $25,$10 + addu $2,$10 + lw $9,4($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + srl $13,$8,3 # Xupdate(31) + rotr $11,$8,7 + addu $23,$16 # +=X[i+9] + xor $13,$11 + rotr $11,$8,18 + + srl $14,$21,10 + rotr $12,$21,17 + xor $13,$11 # sigma0(X[i+1]) + rotr $11,$21,19 + xor $14,$12 + addu $23,$13 +#else + srl $13,$8,3 # Xupdate(31) + addu $23,$16 # +=X[i+9] + sll $12,$8,14 + srl $11,$8,7 + xor $13,$12 + sll $12,11 + xor $13,$11 + srl $11,$8,18 + xor $13,$12 + + srl $14,$21,10 + xor $13,$11 # sigma0(X[i+1]) + sll $12,$21,13 + addu $23,$13 + srl $11,$21,17 + xor $14,$12 + sll $12,2 + xor $14,$11 + srl $11,$21,19 + xor $14,$12 +#endif + xor $14,$11 # sigma1(X[i+14]) + addu $23,$14 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $14,$30,$31 # 31 + rotr $12,$25,6 + addu $11,$23,$1 + rotr $13,$25,11 + and $14,$25 + rotr $1,$25,25 + xor $12,$13 + rotr $13,$2,2 + xor $14,$31 # Ch(e,f,g) + xor $12,$1 # Sigma1(e) + + rotr $1,$2,13 + addu $11,$14 + lw $14,124($6) # K[31] + xor $1,$13 + rotr $13,$2,22 + addu $11,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#else + addu $11,$23,$1 # 31 + srl $1,$25,6 + xor $14,$30,$31 + sll $13,$25,7 + and $14,$25 + srl $12,$25,11 + xor $1,$13 + sll $13,$25,21 + xor $1,$12 + srl $12,$25,25 + xor $1,$13 + sll $13,$25,26 + xor $1,$12 + xor $14,$31 # Ch(e,f,g) + xor $12,$13,$1 # Sigma1(e) + + srl $1,$2,2 + addu $11,$14 + lw $14,124($6) # K[31] + sll $13,$2,10 + addu $11,$12 + srl $12,$2,13 + xor $1,$13 + sll $13,$2,19 + xor $1,$12 + srl $12,$2,22 + xor $1,$13 + sll $13,$2,30 + xor $1,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#endif + sw $23,60($29) # offload to ring buffer + addu $1,$12 + and $13,$2 + addu $11,$14 # +=K[31] + addu $1,$13 # +=Maj(a,b,c) + addu $24,$11 + addu $1,$11 + lw $10,8($29) # prefetch from ring buffer + and $14,0xfff + li $15,2290 + .set noreorder + bne $14,$15,.L16_xx + daddu $6,16*4 # Ktbl+=16 + + ld $23,16*4($29) # restore pointer to the end of input + lw $8,0*4($4) + lw $9,1*4($4) + lw $10,2*4($4) + daddu $5,16*4 + lw $11,3*4($4) + addu $1,$8 + lw $12,4*4($4) + addu $2,$9 + lw $13,5*4($4) + addu $3,$10 + lw $14,6*4($4) + addu $7,$11 + lw $15,7*4($4) + addu $24,$12 + sw $1,0*4($4) + addu $25,$13 + sw $2,1*4($4) + addu $30,$14 + sw $3,2*4($4) + addu $31,$15 + sw $7,3*4($4) + sw $24,4*4($4) + sw $25,5*4($4) + sw $30,6*4($4) + sw $31,7*4($4) + + bne $5,$23,.Loop + dsubu $6,192 # rewind $6 + + ld $31,192-1*8($29) + ld $30,192-2*8($29) + ld $23,192-3*8($29) + ld $22,192-4*8($29) + ld $21,192-5*8($29) + ld $20,192-6*8($29) + ld $19,192-7*8($29) + ld $18,192-8*8($29) + ld $17,192-9*8($29) + ld $16,192-10*8($29) + jr $31 + daddu $29,192 +.end sha256_block_data_order + +.rdata +.align 5 +K256: + .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 + .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5 + .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3 + .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174 + .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc + .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da + .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7 + .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967 + .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13 + .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85 + .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3 + .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070 + .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5 + .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3 + .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208 + .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 +.asciiz "SHA256 for MIPS, CRYPTOGAMS by " +.align 5 + diff --git a/deps/openssl/config/archs/linux64-mips64/asm_avx2/crypto/sha/sha512-mips.S b/deps/openssl/config/archs/linux64-mips64/asm_avx2/crypto/sha/sha512-mips.S new file mode 100644 index 00000000000000..6318b5a40f9bd0 --- /dev/null +++ b/deps/openssl/config/archs/linux64-mips64/asm_avx2/crypto/sha/sha512-mips.S @@ -0,0 +1,3205 @@ +#include "mips_arch.h" + +.text +.set noat +#if !defined(__mips_eabi) && (!defined(__vxworks) || defined(__pic__)) +.option pic2 +#endif + +.align 5 +.globl sha512_block_data_order +.ent sha512_block_data_order +sha512_block_data_order: + .frame $29,256,$31 + .mask 0xc0ff0000,-8 + .set noreorder + dsubu $29,256 + sd $31,256-1*8($29) + sd $30,256-2*8($29) + sd $23,256-3*8($29) + sd $22,256-4*8($29) + sd $21,256-5*8($29) + sd $20,256-6*8($29) + sd $19,256-7*8($29) + sd $18,256-8*8($29) + sd $17,256-9*8($29) + sd $16,256-10*8($29) + dsll $23,$6,7 + .cplocal $6 + .cpsetup $25,$0,sha512_block_data_order + .set reorder + dla $6,K512 # PIC-ified 'load address' + + ld $1,0*8($4) # load context + ld $2,1*8($4) + ld $3,2*8($4) + ld $7,3*8($4) + ld $24,4*8($4) + ld $25,5*8($4) + ld $30,6*8($4) + ld $31,7*8($4) + + daddu $23,$5 # pointer to the end of input + sd $23,16*8($29) + b .Loop + +.align 5 +.Loop: +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $8,($5) +#else + ldl $8,7($5) + ldr $8,0($5) +#endif +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $9,8($5) +#else + ldl $9,15($5) + ldr $9,8($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $8,$8 # byte swap(0) + dshd $8,$8 +#else + ori $13,$0,0xFF + dsll $15,$13,32 + or $13,$15 # 0x000000FF000000FF + and $14,$8,$13 # byte swap(0) + dsrl $15,$8,24 + dsll $14,24 + and $15,$13 + dsll $13,8 # 0x0000FF000000FF00 + or $14,$15 + and $15,$8,$13 + dsrl $8,8 + dsll $15,8 + and $8,$13 + or $14,$15 + or $8,$14 + dsrl $14,$8,32 + dsll $8,32 + or $8,$14 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $15,$25,$30 # 0 + drotr $13,$24,14 + daddu $12,$8,$31 + drotr $14,$24,18 + and $15,$24 + drotr $31,$24,41 + xor $13,$14 + drotr $14,$1,28 + xor $15,$30 # Ch(e,f,g) + xor $13,$31 # Sigma1(e) + + drotr $31,$1,34 + daddu $12,$15 + ld $15,0($6) # K[0] + xor $31,$14 + drotr $14,$1,39 + daddu $12,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#else + daddu $12,$8,$31 # 0 + dsrl $31,$24,14 + xor $15,$25,$30 + dsll $14,$24,23 + and $15,$24 + dsrl $13,$24,18 + xor $31,$14 + dsll $14,$24,46 + xor $31,$13 + dsrl $13,$24,41 + xor $31,$14 + dsll $14,$24,50 + xor $31,$13 + xor $15,$30 # Ch(e,f,g) + xor $13,$14,$31 # Sigma1(e) + + dsrl $31,$1,28 + daddu $12,$15 + ld $15,0($6) # K[0] + dsll $14,$1,25 + daddu $12,$13 + dsrl $13,$1,34 + xor $31,$14 + dsll $14,$1,30 + xor $31,$13 + dsrl $13,$1,39 + xor $31,$14 + dsll $14,$1,36 + xor $31,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#endif + sd $8,0($29) # offload to ring buffer + daddu $31,$13 + and $14,$1 + daddu $12,$15 # +=K[0] + daddu $31,$14 # +=Maj(a,b,c) + daddu $7,$12 + daddu $31,$12 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $10,16($5) +#else + ldl $10,23($5) + ldr $10,16($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $9,$9 # byte swap(1) + dshd $9,$9 +#else + ori $14,$0,0xFF + dsll $16,$14,32 + or $14,$16 # 0x000000FF000000FF + and $15,$9,$14 # byte swap(1) + dsrl $16,$9,24 + dsll $15,24 + and $16,$14 + dsll $14,8 # 0x0000FF000000FF00 + or $15,$16 + and $16,$9,$14 + dsrl $9,8 + dsll $16,8 + and $9,$14 + or $15,$16 + or $9,$15 + dsrl $15,$9,32 + dsll $9,32 + or $9,$15 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $16,$24,$25 # 1 + drotr $14,$7,14 + daddu $13,$9,$30 + drotr $15,$7,18 + and $16,$7 + drotr $30,$7,41 + xor $14,$15 + drotr $15,$31,28 + xor $16,$25 # Ch(e,f,g) + xor $14,$30 # Sigma1(e) + + drotr $30,$31,34 + daddu $13,$16 + ld $16,8($6) # K[1] + xor $30,$15 + drotr $15,$31,39 + daddu $13,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#else + daddu $13,$9,$30 # 1 + dsrl $30,$7,14 + xor $16,$24,$25 + dsll $15,$7,23 + and $16,$7 + dsrl $14,$7,18 + xor $30,$15 + dsll $15,$7,46 + xor $30,$14 + dsrl $14,$7,41 + xor $30,$15 + dsll $15,$7,50 + xor $30,$14 + xor $16,$25 # Ch(e,f,g) + xor $14,$15,$30 # Sigma1(e) + + dsrl $30,$31,28 + daddu $13,$16 + ld $16,8($6) # K[1] + dsll $15,$31,25 + daddu $13,$14 + dsrl $14,$31,34 + xor $30,$15 + dsll $15,$31,30 + xor $30,$14 + dsrl $14,$31,39 + xor $30,$15 + dsll $15,$31,36 + xor $30,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#endif + sd $9,8($29) # offload to ring buffer + daddu $30,$14 + and $15,$31 + daddu $13,$16 # +=K[1] + daddu $30,$15 # +=Maj(a,b,c) + daddu $3,$13 + daddu $30,$13 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $11,24($5) +#else + ldl $11,31($5) + ldr $11,24($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $10,$10 # byte swap(2) + dshd $10,$10 +#else + ori $15,$0,0xFF + dsll $17,$15,32 + or $15,$17 # 0x000000FF000000FF + and $16,$10,$15 # byte swap(2) + dsrl $17,$10,24 + dsll $16,24 + and $17,$15 + dsll $15,8 # 0x0000FF000000FF00 + or $16,$17 + and $17,$10,$15 + dsrl $10,8 + dsll $17,8 + and $10,$15 + or $16,$17 + or $10,$16 + dsrl $16,$10,32 + dsll $10,32 + or $10,$16 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $17,$7,$24 # 2 + drotr $15,$3,14 + daddu $14,$10,$25 + drotr $16,$3,18 + and $17,$3 + drotr $25,$3,41 + xor $15,$16 + drotr $16,$30,28 + xor $17,$24 # Ch(e,f,g) + xor $15,$25 # Sigma1(e) + + drotr $25,$30,34 + daddu $14,$17 + ld $17,16($6) # K[2] + xor $25,$16 + drotr $16,$30,39 + daddu $14,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#else + daddu $14,$10,$25 # 2 + dsrl $25,$3,14 + xor $17,$7,$24 + dsll $16,$3,23 + and $17,$3 + dsrl $15,$3,18 + xor $25,$16 + dsll $16,$3,46 + xor $25,$15 + dsrl $15,$3,41 + xor $25,$16 + dsll $16,$3,50 + xor $25,$15 + xor $17,$24 # Ch(e,f,g) + xor $15,$16,$25 # Sigma1(e) + + dsrl $25,$30,28 + daddu $14,$17 + ld $17,16($6) # K[2] + dsll $16,$30,25 + daddu $14,$15 + dsrl $15,$30,34 + xor $25,$16 + dsll $16,$30,30 + xor $25,$15 + dsrl $15,$30,39 + xor $25,$16 + dsll $16,$30,36 + xor $25,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#endif + sd $10,16($29) # offload to ring buffer + daddu $25,$15 + and $16,$30 + daddu $14,$17 # +=K[2] + daddu $25,$16 # +=Maj(a,b,c) + daddu $2,$14 + daddu $25,$14 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $12,32($5) +#else + ldl $12,39($5) + ldr $12,32($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $11,$11 # byte swap(3) + dshd $11,$11 +#else + ori $16,$0,0xFF + dsll $18,$16,32 + or $16,$18 # 0x000000FF000000FF + and $17,$11,$16 # byte swap(3) + dsrl $18,$11,24 + dsll $17,24 + and $18,$16 + dsll $16,8 # 0x0000FF000000FF00 + or $17,$18 + and $18,$11,$16 + dsrl $11,8 + dsll $18,8 + and $11,$16 + or $17,$18 + or $11,$17 + dsrl $17,$11,32 + dsll $11,32 + or $11,$17 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $18,$3,$7 # 3 + drotr $16,$2,14 + daddu $15,$11,$24 + drotr $17,$2,18 + and $18,$2 + drotr $24,$2,41 + xor $16,$17 + drotr $17,$25,28 + xor $18,$7 # Ch(e,f,g) + xor $16,$24 # Sigma1(e) + + drotr $24,$25,34 + daddu $15,$18 + ld $18,24($6) # K[3] + xor $24,$17 + drotr $17,$25,39 + daddu $15,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#else + daddu $15,$11,$24 # 3 + dsrl $24,$2,14 + xor $18,$3,$7 + dsll $17,$2,23 + and $18,$2 + dsrl $16,$2,18 + xor $24,$17 + dsll $17,$2,46 + xor $24,$16 + dsrl $16,$2,41 + xor $24,$17 + dsll $17,$2,50 + xor $24,$16 + xor $18,$7 # Ch(e,f,g) + xor $16,$17,$24 # Sigma1(e) + + dsrl $24,$25,28 + daddu $15,$18 + ld $18,24($6) # K[3] + dsll $17,$25,25 + daddu $15,$16 + dsrl $16,$25,34 + xor $24,$17 + dsll $17,$25,30 + xor $24,$16 + dsrl $16,$25,39 + xor $24,$17 + dsll $17,$25,36 + xor $24,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#endif + sd $11,24($29) # offload to ring buffer + daddu $24,$16 + and $17,$25 + daddu $15,$18 # +=K[3] + daddu $24,$17 # +=Maj(a,b,c) + daddu $1,$15 + daddu $24,$15 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $13,40($5) +#else + ldl $13,47($5) + ldr $13,40($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $12,$12 # byte swap(4) + dshd $12,$12 +#else + ori $17,$0,0xFF + dsll $19,$17,32 + or $17,$19 # 0x000000FF000000FF + and $18,$12,$17 # byte swap(4) + dsrl $19,$12,24 + dsll $18,24 + and $19,$17 + dsll $17,8 # 0x0000FF000000FF00 + or $18,$19 + and $19,$12,$17 + dsrl $12,8 + dsll $19,8 + and $12,$17 + or $18,$19 + or $12,$18 + dsrl $18,$12,32 + dsll $12,32 + or $12,$18 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $19,$2,$3 # 4 + drotr $17,$1,14 + daddu $16,$12,$7 + drotr $18,$1,18 + and $19,$1 + drotr $7,$1,41 + xor $17,$18 + drotr $18,$24,28 + xor $19,$3 # Ch(e,f,g) + xor $17,$7 # Sigma1(e) + + drotr $7,$24,34 + daddu $16,$19 + ld $19,32($6) # K[4] + xor $7,$18 + drotr $18,$24,39 + daddu $16,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#else + daddu $16,$12,$7 # 4 + dsrl $7,$1,14 + xor $19,$2,$3 + dsll $18,$1,23 + and $19,$1 + dsrl $17,$1,18 + xor $7,$18 + dsll $18,$1,46 + xor $7,$17 + dsrl $17,$1,41 + xor $7,$18 + dsll $18,$1,50 + xor $7,$17 + xor $19,$3 # Ch(e,f,g) + xor $17,$18,$7 # Sigma1(e) + + dsrl $7,$24,28 + daddu $16,$19 + ld $19,32($6) # K[4] + dsll $18,$24,25 + daddu $16,$17 + dsrl $17,$24,34 + xor $7,$18 + dsll $18,$24,30 + xor $7,$17 + dsrl $17,$24,39 + xor $7,$18 + dsll $18,$24,36 + xor $7,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#endif + sd $12,32($29) # offload to ring buffer + daddu $7,$17 + and $18,$24 + daddu $16,$19 # +=K[4] + daddu $7,$18 # +=Maj(a,b,c) + daddu $31,$16 + daddu $7,$16 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $14,48($5) +#else + ldl $14,55($5) + ldr $14,48($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $13,$13 # byte swap(5) + dshd $13,$13 +#else + ori $18,$0,0xFF + dsll $20,$18,32 + or $18,$20 # 0x000000FF000000FF + and $19,$13,$18 # byte swap(5) + dsrl $20,$13,24 + dsll $19,24 + and $20,$18 + dsll $18,8 # 0x0000FF000000FF00 + or $19,$20 + and $20,$13,$18 + dsrl $13,8 + dsll $20,8 + and $13,$18 + or $19,$20 + or $13,$19 + dsrl $19,$13,32 + dsll $13,32 + or $13,$19 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $20,$1,$2 # 5 + drotr $18,$31,14 + daddu $17,$13,$3 + drotr $19,$31,18 + and $20,$31 + drotr $3,$31,41 + xor $18,$19 + drotr $19,$7,28 + xor $20,$2 # Ch(e,f,g) + xor $18,$3 # Sigma1(e) + + drotr $3,$7,34 + daddu $17,$20 + ld $20,40($6) # K[5] + xor $3,$19 + drotr $19,$7,39 + daddu $17,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#else + daddu $17,$13,$3 # 5 + dsrl $3,$31,14 + xor $20,$1,$2 + dsll $19,$31,23 + and $20,$31 + dsrl $18,$31,18 + xor $3,$19 + dsll $19,$31,46 + xor $3,$18 + dsrl $18,$31,41 + xor $3,$19 + dsll $19,$31,50 + xor $3,$18 + xor $20,$2 # Ch(e,f,g) + xor $18,$19,$3 # Sigma1(e) + + dsrl $3,$7,28 + daddu $17,$20 + ld $20,40($6) # K[5] + dsll $19,$7,25 + daddu $17,$18 + dsrl $18,$7,34 + xor $3,$19 + dsll $19,$7,30 + xor $3,$18 + dsrl $18,$7,39 + xor $3,$19 + dsll $19,$7,36 + xor $3,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#endif + sd $13,40($29) # offload to ring buffer + daddu $3,$18 + and $19,$7 + daddu $17,$20 # +=K[5] + daddu $3,$19 # +=Maj(a,b,c) + daddu $30,$17 + daddu $3,$17 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $15,56($5) +#else + ldl $15,63($5) + ldr $15,56($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $14,$14 # byte swap(6) + dshd $14,$14 +#else + ori $19,$0,0xFF + dsll $21,$19,32 + or $19,$21 # 0x000000FF000000FF + and $20,$14,$19 # byte swap(6) + dsrl $21,$14,24 + dsll $20,24 + and $21,$19 + dsll $19,8 # 0x0000FF000000FF00 + or $20,$21 + and $21,$14,$19 + dsrl $14,8 + dsll $21,8 + and $14,$19 + or $20,$21 + or $14,$20 + dsrl $20,$14,32 + dsll $14,32 + or $14,$20 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $21,$31,$1 # 6 + drotr $19,$30,14 + daddu $18,$14,$2 + drotr $20,$30,18 + and $21,$30 + drotr $2,$30,41 + xor $19,$20 + drotr $20,$3,28 + xor $21,$1 # Ch(e,f,g) + xor $19,$2 # Sigma1(e) + + drotr $2,$3,34 + daddu $18,$21 + ld $21,48($6) # K[6] + xor $2,$20 + drotr $20,$3,39 + daddu $18,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#else + daddu $18,$14,$2 # 6 + dsrl $2,$30,14 + xor $21,$31,$1 + dsll $20,$30,23 + and $21,$30 + dsrl $19,$30,18 + xor $2,$20 + dsll $20,$30,46 + xor $2,$19 + dsrl $19,$30,41 + xor $2,$20 + dsll $20,$30,50 + xor $2,$19 + xor $21,$1 # Ch(e,f,g) + xor $19,$20,$2 # Sigma1(e) + + dsrl $2,$3,28 + daddu $18,$21 + ld $21,48($6) # K[6] + dsll $20,$3,25 + daddu $18,$19 + dsrl $19,$3,34 + xor $2,$20 + dsll $20,$3,30 + xor $2,$19 + dsrl $19,$3,39 + xor $2,$20 + dsll $20,$3,36 + xor $2,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#endif + sd $14,48($29) # offload to ring buffer + daddu $2,$19 + and $20,$3 + daddu $18,$21 # +=K[6] + daddu $2,$20 # +=Maj(a,b,c) + daddu $25,$18 + daddu $2,$18 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $16,64($5) +#else + ldl $16,71($5) + ldr $16,64($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $15,$15 # byte swap(7) + dshd $15,$15 +#else + ori $20,$0,0xFF + dsll $22,$20,32 + or $20,$22 # 0x000000FF000000FF + and $21,$15,$20 # byte swap(7) + dsrl $22,$15,24 + dsll $21,24 + and $22,$20 + dsll $20,8 # 0x0000FF000000FF00 + or $21,$22 + and $22,$15,$20 + dsrl $15,8 + dsll $22,8 + and $15,$20 + or $21,$22 + or $15,$21 + dsrl $21,$15,32 + dsll $15,32 + or $15,$21 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $22,$30,$31 # 7 + drotr $20,$25,14 + daddu $19,$15,$1 + drotr $21,$25,18 + and $22,$25 + drotr $1,$25,41 + xor $20,$21 + drotr $21,$2,28 + xor $22,$31 # Ch(e,f,g) + xor $20,$1 # Sigma1(e) + + drotr $1,$2,34 + daddu $19,$22 + ld $22,56($6) # K[7] + xor $1,$21 + drotr $21,$2,39 + daddu $19,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#else + daddu $19,$15,$1 # 7 + dsrl $1,$25,14 + xor $22,$30,$31 + dsll $21,$25,23 + and $22,$25 + dsrl $20,$25,18 + xor $1,$21 + dsll $21,$25,46 + xor $1,$20 + dsrl $20,$25,41 + xor $1,$21 + dsll $21,$25,50 + xor $1,$20 + xor $22,$31 # Ch(e,f,g) + xor $20,$21,$1 # Sigma1(e) + + dsrl $1,$2,28 + daddu $19,$22 + ld $22,56($6) # K[7] + dsll $21,$2,25 + daddu $19,$20 + dsrl $20,$2,34 + xor $1,$21 + dsll $21,$2,30 + xor $1,$20 + dsrl $20,$2,39 + xor $1,$21 + dsll $21,$2,36 + xor $1,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#endif + sd $15,56($29) # offload to ring buffer + daddu $1,$20 + and $21,$2 + daddu $19,$22 # +=K[7] + daddu $1,$21 # +=Maj(a,b,c) + daddu $24,$19 + daddu $1,$19 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $17,72($5) +#else + ldl $17,79($5) + ldr $17,72($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $16,$16 # byte swap(8) + dshd $16,$16 +#else + ori $21,$0,0xFF + dsll $23,$21,32 + or $21,$23 # 0x000000FF000000FF + and $22,$16,$21 # byte swap(8) + dsrl $23,$16,24 + dsll $22,24 + and $23,$21 + dsll $21,8 # 0x0000FF000000FF00 + or $22,$23 + and $23,$16,$21 + dsrl $16,8 + dsll $23,8 + and $16,$21 + or $22,$23 + or $16,$22 + dsrl $22,$16,32 + dsll $16,32 + or $16,$22 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $23,$25,$30 # 8 + drotr $21,$24,14 + daddu $20,$16,$31 + drotr $22,$24,18 + and $23,$24 + drotr $31,$24,41 + xor $21,$22 + drotr $22,$1,28 + xor $23,$30 # Ch(e,f,g) + xor $21,$31 # Sigma1(e) + + drotr $31,$1,34 + daddu $20,$23 + ld $23,64($6) # K[8] + xor $31,$22 + drotr $22,$1,39 + daddu $20,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#else + daddu $20,$16,$31 # 8 + dsrl $31,$24,14 + xor $23,$25,$30 + dsll $22,$24,23 + and $23,$24 + dsrl $21,$24,18 + xor $31,$22 + dsll $22,$24,46 + xor $31,$21 + dsrl $21,$24,41 + xor $31,$22 + dsll $22,$24,50 + xor $31,$21 + xor $23,$30 # Ch(e,f,g) + xor $21,$22,$31 # Sigma1(e) + + dsrl $31,$1,28 + daddu $20,$23 + ld $23,64($6) # K[8] + dsll $22,$1,25 + daddu $20,$21 + dsrl $21,$1,34 + xor $31,$22 + dsll $22,$1,30 + xor $31,$21 + dsrl $21,$1,39 + xor $31,$22 + dsll $22,$1,36 + xor $31,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#endif + sd $16,64($29) # offload to ring buffer + daddu $31,$21 + and $22,$1 + daddu $20,$23 # +=K[8] + daddu $31,$22 # +=Maj(a,b,c) + daddu $7,$20 + daddu $31,$20 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $18,80($5) +#else + ldl $18,87($5) + ldr $18,80($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $17,$17 # byte swap(9) + dshd $17,$17 +#else + ori $22,$0,0xFF + dsll $8,$22,32 + or $22,$8 # 0x000000FF000000FF + and $23,$17,$22 # byte swap(9) + dsrl $8,$17,24 + dsll $23,24 + and $8,$22 + dsll $22,8 # 0x0000FF000000FF00 + or $23,$8 + and $8,$17,$22 + dsrl $17,8 + dsll $8,8 + and $17,$22 + or $23,$8 + or $17,$23 + dsrl $23,$17,32 + dsll $17,32 + or $17,$23 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $8,$24,$25 # 9 + drotr $22,$7,14 + daddu $21,$17,$30 + drotr $23,$7,18 + and $8,$7 + drotr $30,$7,41 + xor $22,$23 + drotr $23,$31,28 + xor $8,$25 # Ch(e,f,g) + xor $22,$30 # Sigma1(e) + + drotr $30,$31,34 + daddu $21,$8 + ld $8,72($6) # K[9] + xor $30,$23 + drotr $23,$31,39 + daddu $21,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#else + daddu $21,$17,$30 # 9 + dsrl $30,$7,14 + xor $8,$24,$25 + dsll $23,$7,23 + and $8,$7 + dsrl $22,$7,18 + xor $30,$23 + dsll $23,$7,46 + xor $30,$22 + dsrl $22,$7,41 + xor $30,$23 + dsll $23,$7,50 + xor $30,$22 + xor $8,$25 # Ch(e,f,g) + xor $22,$23,$30 # Sigma1(e) + + dsrl $30,$31,28 + daddu $21,$8 + ld $8,72($6) # K[9] + dsll $23,$31,25 + daddu $21,$22 + dsrl $22,$31,34 + xor $30,$23 + dsll $23,$31,30 + xor $30,$22 + dsrl $22,$31,39 + xor $30,$23 + dsll $23,$31,36 + xor $30,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#endif + sd $17,72($29) # offload to ring buffer + daddu $30,$22 + and $23,$31 + daddu $21,$8 # +=K[9] + daddu $30,$23 # +=Maj(a,b,c) + daddu $3,$21 + daddu $30,$21 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $19,88($5) +#else + ldl $19,95($5) + ldr $19,88($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $18,$18 # byte swap(10) + dshd $18,$18 +#else + ori $23,$0,0xFF + dsll $9,$23,32 + or $23,$9 # 0x000000FF000000FF + and $8,$18,$23 # byte swap(10) + dsrl $9,$18,24 + dsll $8,24 + and $9,$23 + dsll $23,8 # 0x0000FF000000FF00 + or $8,$9 + and $9,$18,$23 + dsrl $18,8 + dsll $9,8 + and $18,$23 + or $8,$9 + or $18,$8 + dsrl $8,$18,32 + dsll $18,32 + or $18,$8 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $9,$7,$24 # 10 + drotr $23,$3,14 + daddu $22,$18,$25 + drotr $8,$3,18 + and $9,$3 + drotr $25,$3,41 + xor $23,$8 + drotr $8,$30,28 + xor $9,$24 # Ch(e,f,g) + xor $23,$25 # Sigma1(e) + + drotr $25,$30,34 + daddu $22,$9 + ld $9,80($6) # K[10] + xor $25,$8 + drotr $8,$30,39 + daddu $22,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#else + daddu $22,$18,$25 # 10 + dsrl $25,$3,14 + xor $9,$7,$24 + dsll $8,$3,23 + and $9,$3 + dsrl $23,$3,18 + xor $25,$8 + dsll $8,$3,46 + xor $25,$23 + dsrl $23,$3,41 + xor $25,$8 + dsll $8,$3,50 + xor $25,$23 + xor $9,$24 # Ch(e,f,g) + xor $23,$8,$25 # Sigma1(e) + + dsrl $25,$30,28 + daddu $22,$9 + ld $9,80($6) # K[10] + dsll $8,$30,25 + daddu $22,$23 + dsrl $23,$30,34 + xor $25,$8 + dsll $8,$30,30 + xor $25,$23 + dsrl $23,$30,39 + xor $25,$8 + dsll $8,$30,36 + xor $25,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#endif + sd $18,80($29) # offload to ring buffer + daddu $25,$23 + and $8,$30 + daddu $22,$9 # +=K[10] + daddu $25,$8 # +=Maj(a,b,c) + daddu $2,$22 + daddu $25,$22 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $20,96($5) +#else + ldl $20,103($5) + ldr $20,96($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $19,$19 # byte swap(11) + dshd $19,$19 +#else + ori $8,$0,0xFF + dsll $10,$8,32 + or $8,$10 # 0x000000FF000000FF + and $9,$19,$8 # byte swap(11) + dsrl $10,$19,24 + dsll $9,24 + and $10,$8 + dsll $8,8 # 0x0000FF000000FF00 + or $9,$10 + and $10,$19,$8 + dsrl $19,8 + dsll $10,8 + and $19,$8 + or $9,$10 + or $19,$9 + dsrl $9,$19,32 + dsll $19,32 + or $19,$9 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $10,$3,$7 # 11 + drotr $8,$2,14 + daddu $23,$19,$24 + drotr $9,$2,18 + and $10,$2 + drotr $24,$2,41 + xor $8,$9 + drotr $9,$25,28 + xor $10,$7 # Ch(e,f,g) + xor $8,$24 # Sigma1(e) + + drotr $24,$25,34 + daddu $23,$10 + ld $10,88($6) # K[11] + xor $24,$9 + drotr $9,$25,39 + daddu $23,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#else + daddu $23,$19,$24 # 11 + dsrl $24,$2,14 + xor $10,$3,$7 + dsll $9,$2,23 + and $10,$2 + dsrl $8,$2,18 + xor $24,$9 + dsll $9,$2,46 + xor $24,$8 + dsrl $8,$2,41 + xor $24,$9 + dsll $9,$2,50 + xor $24,$8 + xor $10,$7 # Ch(e,f,g) + xor $8,$9,$24 # Sigma1(e) + + dsrl $24,$25,28 + daddu $23,$10 + ld $10,88($6) # K[11] + dsll $9,$25,25 + daddu $23,$8 + dsrl $8,$25,34 + xor $24,$9 + dsll $9,$25,30 + xor $24,$8 + dsrl $8,$25,39 + xor $24,$9 + dsll $9,$25,36 + xor $24,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#endif + sd $19,88($29) # offload to ring buffer + daddu $24,$8 + and $9,$25 + daddu $23,$10 # +=K[11] + daddu $24,$9 # +=Maj(a,b,c) + daddu $1,$23 + daddu $24,$23 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $21,104($5) +#else + ldl $21,111($5) + ldr $21,104($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $20,$20 # byte swap(12) + dshd $20,$20 +#else + ori $9,$0,0xFF + dsll $11,$9,32 + or $9,$11 # 0x000000FF000000FF + and $10,$20,$9 # byte swap(12) + dsrl $11,$20,24 + dsll $10,24 + and $11,$9 + dsll $9,8 # 0x0000FF000000FF00 + or $10,$11 + and $11,$20,$9 + dsrl $20,8 + dsll $11,8 + and $20,$9 + or $10,$11 + or $20,$10 + dsrl $10,$20,32 + dsll $20,32 + or $20,$10 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $11,$2,$3 # 12 + drotr $9,$1,14 + daddu $8,$20,$7 + drotr $10,$1,18 + and $11,$1 + drotr $7,$1,41 + xor $9,$10 + drotr $10,$24,28 + xor $11,$3 # Ch(e,f,g) + xor $9,$7 # Sigma1(e) + + drotr $7,$24,34 + daddu $8,$11 + ld $11,96($6) # K[12] + xor $7,$10 + drotr $10,$24,39 + daddu $8,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#else + daddu $8,$20,$7 # 12 + dsrl $7,$1,14 + xor $11,$2,$3 + dsll $10,$1,23 + and $11,$1 + dsrl $9,$1,18 + xor $7,$10 + dsll $10,$1,46 + xor $7,$9 + dsrl $9,$1,41 + xor $7,$10 + dsll $10,$1,50 + xor $7,$9 + xor $11,$3 # Ch(e,f,g) + xor $9,$10,$7 # Sigma1(e) + + dsrl $7,$24,28 + daddu $8,$11 + ld $11,96($6) # K[12] + dsll $10,$24,25 + daddu $8,$9 + dsrl $9,$24,34 + xor $7,$10 + dsll $10,$24,30 + xor $7,$9 + dsrl $9,$24,39 + xor $7,$10 + dsll $10,$24,36 + xor $7,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#endif + sd $20,96($29) # offload to ring buffer + daddu $7,$9 + and $10,$24 + daddu $8,$11 # +=K[12] + daddu $7,$10 # +=Maj(a,b,c) + daddu $31,$8 + daddu $7,$8 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $22,112($5) +#else + ldl $22,119($5) + ldr $22,112($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $21,$21 # byte swap(13) + dshd $21,$21 +#else + ori $10,$0,0xFF + dsll $12,$10,32 + or $10,$12 # 0x000000FF000000FF + and $11,$21,$10 # byte swap(13) + dsrl $12,$21,24 + dsll $11,24 + and $12,$10 + dsll $10,8 # 0x0000FF000000FF00 + or $11,$12 + and $12,$21,$10 + dsrl $21,8 + dsll $12,8 + and $21,$10 + or $11,$12 + or $21,$11 + dsrl $11,$21,32 + dsll $21,32 + or $21,$11 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $12,$1,$2 # 13 + drotr $10,$31,14 + daddu $9,$21,$3 + drotr $11,$31,18 + and $12,$31 + drotr $3,$31,41 + xor $10,$11 + drotr $11,$7,28 + xor $12,$2 # Ch(e,f,g) + xor $10,$3 # Sigma1(e) + + drotr $3,$7,34 + daddu $9,$12 + ld $12,104($6) # K[13] + xor $3,$11 + drotr $11,$7,39 + daddu $9,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#else + daddu $9,$21,$3 # 13 + dsrl $3,$31,14 + xor $12,$1,$2 + dsll $11,$31,23 + and $12,$31 + dsrl $10,$31,18 + xor $3,$11 + dsll $11,$31,46 + xor $3,$10 + dsrl $10,$31,41 + xor $3,$11 + dsll $11,$31,50 + xor $3,$10 + xor $12,$2 # Ch(e,f,g) + xor $10,$11,$3 # Sigma1(e) + + dsrl $3,$7,28 + daddu $9,$12 + ld $12,104($6) # K[13] + dsll $11,$7,25 + daddu $9,$10 + dsrl $10,$7,34 + xor $3,$11 + dsll $11,$7,30 + xor $3,$10 + dsrl $10,$7,39 + xor $3,$11 + dsll $11,$7,36 + xor $3,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#endif + sd $21,104($29) # offload to ring buffer + daddu $3,$10 + and $11,$7 + daddu $9,$12 # +=K[13] + daddu $3,$11 # +=Maj(a,b,c) + daddu $30,$9 + daddu $3,$9 + ld $8,0($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + ld $23,120($5) +#else + ldl $23,127($5) + ldr $23,120($5) +#endif +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $22,$22 # byte swap(14) + dshd $22,$22 +#else + ori $11,$0,0xFF + dsll $13,$11,32 + or $11,$13 # 0x000000FF000000FF + and $12,$22,$11 # byte swap(14) + dsrl $13,$22,24 + dsll $12,24 + and $13,$11 + dsll $11,8 # 0x0000FF000000FF00 + or $12,$13 + and $13,$22,$11 + dsrl $22,8 + dsll $13,8 + and $22,$11 + or $12,$13 + or $22,$12 + dsrl $12,$22,32 + dsll $22,32 + or $22,$12 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $13,$31,$1 # 14 + drotr $11,$30,14 + daddu $10,$22,$2 + drotr $12,$30,18 + and $13,$30 + drotr $2,$30,41 + xor $11,$12 + drotr $12,$3,28 + xor $13,$1 # Ch(e,f,g) + xor $11,$2 # Sigma1(e) + + drotr $2,$3,34 + daddu $10,$13 + ld $13,112($6) # K[14] + xor $2,$12 + drotr $12,$3,39 + daddu $10,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#else + daddu $10,$22,$2 # 14 + dsrl $2,$30,14 + xor $13,$31,$1 + dsll $12,$30,23 + and $13,$30 + dsrl $11,$30,18 + xor $2,$12 + dsll $12,$30,46 + xor $2,$11 + dsrl $11,$30,41 + xor $2,$12 + dsll $12,$30,50 + xor $2,$11 + xor $13,$1 # Ch(e,f,g) + xor $11,$12,$2 # Sigma1(e) + + dsrl $2,$3,28 + daddu $10,$13 + ld $13,112($6) # K[14] + dsll $12,$3,25 + daddu $10,$11 + dsrl $11,$3,34 + xor $2,$12 + dsll $12,$3,30 + xor $2,$11 + dsrl $11,$3,39 + xor $2,$12 + dsll $12,$3,36 + xor $2,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#endif + sd $22,112($29) # offload to ring buffer + daddu $2,$11 + and $12,$3 + daddu $10,$13 # +=K[14] + daddu $2,$12 # +=Maj(a,b,c) + daddu $25,$10 + daddu $2,$10 + ld $9,8($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS64R2) + dsbh $23,$23 # byte swap(15) + dshd $23,$23 +#else + ori $12,$0,0xFF + dsll $14,$12,32 + or $12,$14 # 0x000000FF000000FF + and $13,$23,$12 # byte swap(15) + dsrl $14,$23,24 + dsll $13,24 + and $14,$12 + dsll $12,8 # 0x0000FF000000FF00 + or $13,$14 + and $14,$23,$12 + dsrl $23,8 + dsll $14,8 + and $23,$12 + or $13,$14 + or $23,$13 + dsrl $13,$23,32 + dsll $23,32 + or $23,$13 +#endif +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $14,$30,$31 # 15 + drotr $12,$25,14 + daddu $11,$23,$1 + drotr $13,$25,18 + and $14,$25 + drotr $1,$25,41 + xor $12,$13 + drotr $13,$2,28 + xor $14,$31 # Ch(e,f,g) + xor $12,$1 # Sigma1(e) + + drotr $1,$2,34 + daddu $11,$14 + ld $14,120($6) # K[15] + xor $1,$13 + drotr $13,$2,39 + daddu $11,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#else + daddu $11,$23,$1 # 15 + dsrl $1,$25,14 + xor $14,$30,$31 + dsll $13,$25,23 + and $14,$25 + dsrl $12,$25,18 + xor $1,$13 + dsll $13,$25,46 + xor $1,$12 + dsrl $12,$25,41 + xor $1,$13 + dsll $13,$25,50 + xor $1,$12 + xor $14,$31 # Ch(e,f,g) + xor $12,$13,$1 # Sigma1(e) + + dsrl $1,$2,28 + daddu $11,$14 + ld $14,120($6) # K[15] + dsll $13,$2,25 + daddu $11,$12 + dsrl $12,$2,34 + xor $1,$13 + dsll $13,$2,30 + xor $1,$12 + dsrl $12,$2,39 + xor $1,$13 + dsll $13,$2,36 + xor $1,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#endif + sd $23,120($29) # offload to ring buffer + daddu $1,$12 + and $13,$2 + daddu $11,$14 # +=K[15] + daddu $1,$13 # +=Maj(a,b,c) + daddu $24,$11 + daddu $1,$11 + ld $10,16($29) # prefetch from ring buffer + b .L16_xx +.align 4 +.L16_xx: +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $14,$9,7 # Xupdate(16) + drotr $12,$9,1 + daddu $8,$17 # +=X[i+9] + xor $14,$12 + drotr $12,$9,8 + + dsrl $15,$22,6 + drotr $13,$22,19 + xor $14,$12 # sigma0(X[i+1]) + drotr $12,$22,61 + xor $15,$13 + daddu $8,$14 +#else + dsrl $14,$9,7 # Xupdate(16) + daddu $8,$17 # +=X[i+9] + dsll $13,$9,56 + dsrl $12,$9,1 + xor $14,$13 + dsll $13,7 + xor $14,$12 + dsrl $12,$9,8 + xor $14,$13 + + dsrl $15,$22,6 + xor $14,$12 # sigma0(X[i+1]) + dsll $13,$22,3 + daddu $8,$14 + dsrl $12,$22,19 + xor $15,$13 + dsll $13,42 + xor $15,$12 + dsrl $12,$22,61 + xor $15,$13 +#endif + xor $15,$12 # sigma1(X[i+14]) + daddu $8,$15 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $15,$25,$30 # 16 + drotr $13,$24,14 + daddu $12,$8,$31 + drotr $14,$24,18 + and $15,$24 + drotr $31,$24,41 + xor $13,$14 + drotr $14,$1,28 + xor $15,$30 # Ch(e,f,g) + xor $13,$31 # Sigma1(e) + + drotr $31,$1,34 + daddu $12,$15 + ld $15,128($6) # K[16] + xor $31,$14 + drotr $14,$1,39 + daddu $12,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#else + daddu $12,$8,$31 # 16 + dsrl $31,$24,14 + xor $15,$25,$30 + dsll $14,$24,23 + and $15,$24 + dsrl $13,$24,18 + xor $31,$14 + dsll $14,$24,46 + xor $31,$13 + dsrl $13,$24,41 + xor $31,$14 + dsll $14,$24,50 + xor $31,$13 + xor $15,$30 # Ch(e,f,g) + xor $13,$14,$31 # Sigma1(e) + + dsrl $31,$1,28 + daddu $12,$15 + ld $15,128($6) # K[16] + dsll $14,$1,25 + daddu $12,$13 + dsrl $13,$1,34 + xor $31,$14 + dsll $14,$1,30 + xor $31,$13 + dsrl $13,$1,39 + xor $31,$14 + dsll $14,$1,36 + xor $31,$13 + and $13,$2,$3 + xor $31,$14 # Sigma0(a) + xor $14,$2,$3 +#endif + sd $8,0($29) # offload to ring buffer + daddu $31,$13 + and $14,$1 + daddu $12,$15 # +=K[16] + daddu $31,$14 # +=Maj(a,b,c) + daddu $7,$12 + daddu $31,$12 + ld $11,24($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $15,$10,7 # Xupdate(17) + drotr $13,$10,1 + daddu $9,$18 # +=X[i+9] + xor $15,$13 + drotr $13,$10,8 + + dsrl $16,$23,6 + drotr $14,$23,19 + xor $15,$13 # sigma0(X[i+1]) + drotr $13,$23,61 + xor $16,$14 + daddu $9,$15 +#else + dsrl $15,$10,7 # Xupdate(17) + daddu $9,$18 # +=X[i+9] + dsll $14,$10,56 + dsrl $13,$10,1 + xor $15,$14 + dsll $14,7 + xor $15,$13 + dsrl $13,$10,8 + xor $15,$14 + + dsrl $16,$23,6 + xor $15,$13 # sigma0(X[i+1]) + dsll $14,$23,3 + daddu $9,$15 + dsrl $13,$23,19 + xor $16,$14 + dsll $14,42 + xor $16,$13 + dsrl $13,$23,61 + xor $16,$14 +#endif + xor $16,$13 # sigma1(X[i+14]) + daddu $9,$16 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $16,$24,$25 # 17 + drotr $14,$7,14 + daddu $13,$9,$30 + drotr $15,$7,18 + and $16,$7 + drotr $30,$7,41 + xor $14,$15 + drotr $15,$31,28 + xor $16,$25 # Ch(e,f,g) + xor $14,$30 # Sigma1(e) + + drotr $30,$31,34 + daddu $13,$16 + ld $16,136($6) # K[17] + xor $30,$15 + drotr $15,$31,39 + daddu $13,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#else + daddu $13,$9,$30 # 17 + dsrl $30,$7,14 + xor $16,$24,$25 + dsll $15,$7,23 + and $16,$7 + dsrl $14,$7,18 + xor $30,$15 + dsll $15,$7,46 + xor $30,$14 + dsrl $14,$7,41 + xor $30,$15 + dsll $15,$7,50 + xor $30,$14 + xor $16,$25 # Ch(e,f,g) + xor $14,$15,$30 # Sigma1(e) + + dsrl $30,$31,28 + daddu $13,$16 + ld $16,136($6) # K[17] + dsll $15,$31,25 + daddu $13,$14 + dsrl $14,$31,34 + xor $30,$15 + dsll $15,$31,30 + xor $30,$14 + dsrl $14,$31,39 + xor $30,$15 + dsll $15,$31,36 + xor $30,$14 + and $14,$1,$2 + xor $30,$15 # Sigma0(a) + xor $15,$1,$2 +#endif + sd $9,8($29) # offload to ring buffer + daddu $30,$14 + and $15,$31 + daddu $13,$16 # +=K[17] + daddu $30,$15 # +=Maj(a,b,c) + daddu $3,$13 + daddu $30,$13 + ld $12,32($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $16,$11,7 # Xupdate(18) + drotr $14,$11,1 + daddu $10,$19 # +=X[i+9] + xor $16,$14 + drotr $14,$11,8 + + dsrl $17,$8,6 + drotr $15,$8,19 + xor $16,$14 # sigma0(X[i+1]) + drotr $14,$8,61 + xor $17,$15 + daddu $10,$16 +#else + dsrl $16,$11,7 # Xupdate(18) + daddu $10,$19 # +=X[i+9] + dsll $15,$11,56 + dsrl $14,$11,1 + xor $16,$15 + dsll $15,7 + xor $16,$14 + dsrl $14,$11,8 + xor $16,$15 + + dsrl $17,$8,6 + xor $16,$14 # sigma0(X[i+1]) + dsll $15,$8,3 + daddu $10,$16 + dsrl $14,$8,19 + xor $17,$15 + dsll $15,42 + xor $17,$14 + dsrl $14,$8,61 + xor $17,$15 +#endif + xor $17,$14 # sigma1(X[i+14]) + daddu $10,$17 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $17,$7,$24 # 18 + drotr $15,$3,14 + daddu $14,$10,$25 + drotr $16,$3,18 + and $17,$3 + drotr $25,$3,41 + xor $15,$16 + drotr $16,$30,28 + xor $17,$24 # Ch(e,f,g) + xor $15,$25 # Sigma1(e) + + drotr $25,$30,34 + daddu $14,$17 + ld $17,144($6) # K[18] + xor $25,$16 + drotr $16,$30,39 + daddu $14,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#else + daddu $14,$10,$25 # 18 + dsrl $25,$3,14 + xor $17,$7,$24 + dsll $16,$3,23 + and $17,$3 + dsrl $15,$3,18 + xor $25,$16 + dsll $16,$3,46 + xor $25,$15 + dsrl $15,$3,41 + xor $25,$16 + dsll $16,$3,50 + xor $25,$15 + xor $17,$24 # Ch(e,f,g) + xor $15,$16,$25 # Sigma1(e) + + dsrl $25,$30,28 + daddu $14,$17 + ld $17,144($6) # K[18] + dsll $16,$30,25 + daddu $14,$15 + dsrl $15,$30,34 + xor $25,$16 + dsll $16,$30,30 + xor $25,$15 + dsrl $15,$30,39 + xor $25,$16 + dsll $16,$30,36 + xor $25,$15 + and $15,$31,$1 + xor $25,$16 # Sigma0(a) + xor $16,$31,$1 +#endif + sd $10,16($29) # offload to ring buffer + daddu $25,$15 + and $16,$30 + daddu $14,$17 # +=K[18] + daddu $25,$16 # +=Maj(a,b,c) + daddu $2,$14 + daddu $25,$14 + ld $13,40($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $17,$12,7 # Xupdate(19) + drotr $15,$12,1 + daddu $11,$20 # +=X[i+9] + xor $17,$15 + drotr $15,$12,8 + + dsrl $18,$9,6 + drotr $16,$9,19 + xor $17,$15 # sigma0(X[i+1]) + drotr $15,$9,61 + xor $18,$16 + daddu $11,$17 +#else + dsrl $17,$12,7 # Xupdate(19) + daddu $11,$20 # +=X[i+9] + dsll $16,$12,56 + dsrl $15,$12,1 + xor $17,$16 + dsll $16,7 + xor $17,$15 + dsrl $15,$12,8 + xor $17,$16 + + dsrl $18,$9,6 + xor $17,$15 # sigma0(X[i+1]) + dsll $16,$9,3 + daddu $11,$17 + dsrl $15,$9,19 + xor $18,$16 + dsll $16,42 + xor $18,$15 + dsrl $15,$9,61 + xor $18,$16 +#endif + xor $18,$15 # sigma1(X[i+14]) + daddu $11,$18 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $18,$3,$7 # 19 + drotr $16,$2,14 + daddu $15,$11,$24 + drotr $17,$2,18 + and $18,$2 + drotr $24,$2,41 + xor $16,$17 + drotr $17,$25,28 + xor $18,$7 # Ch(e,f,g) + xor $16,$24 # Sigma1(e) + + drotr $24,$25,34 + daddu $15,$18 + ld $18,152($6) # K[19] + xor $24,$17 + drotr $17,$25,39 + daddu $15,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#else + daddu $15,$11,$24 # 19 + dsrl $24,$2,14 + xor $18,$3,$7 + dsll $17,$2,23 + and $18,$2 + dsrl $16,$2,18 + xor $24,$17 + dsll $17,$2,46 + xor $24,$16 + dsrl $16,$2,41 + xor $24,$17 + dsll $17,$2,50 + xor $24,$16 + xor $18,$7 # Ch(e,f,g) + xor $16,$17,$24 # Sigma1(e) + + dsrl $24,$25,28 + daddu $15,$18 + ld $18,152($6) # K[19] + dsll $17,$25,25 + daddu $15,$16 + dsrl $16,$25,34 + xor $24,$17 + dsll $17,$25,30 + xor $24,$16 + dsrl $16,$25,39 + xor $24,$17 + dsll $17,$25,36 + xor $24,$16 + and $16,$30,$31 + xor $24,$17 # Sigma0(a) + xor $17,$30,$31 +#endif + sd $11,24($29) # offload to ring buffer + daddu $24,$16 + and $17,$25 + daddu $15,$18 # +=K[19] + daddu $24,$17 # +=Maj(a,b,c) + daddu $1,$15 + daddu $24,$15 + ld $14,48($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $18,$13,7 # Xupdate(20) + drotr $16,$13,1 + daddu $12,$21 # +=X[i+9] + xor $18,$16 + drotr $16,$13,8 + + dsrl $19,$10,6 + drotr $17,$10,19 + xor $18,$16 # sigma0(X[i+1]) + drotr $16,$10,61 + xor $19,$17 + daddu $12,$18 +#else + dsrl $18,$13,7 # Xupdate(20) + daddu $12,$21 # +=X[i+9] + dsll $17,$13,56 + dsrl $16,$13,1 + xor $18,$17 + dsll $17,7 + xor $18,$16 + dsrl $16,$13,8 + xor $18,$17 + + dsrl $19,$10,6 + xor $18,$16 # sigma0(X[i+1]) + dsll $17,$10,3 + daddu $12,$18 + dsrl $16,$10,19 + xor $19,$17 + dsll $17,42 + xor $19,$16 + dsrl $16,$10,61 + xor $19,$17 +#endif + xor $19,$16 # sigma1(X[i+14]) + daddu $12,$19 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $19,$2,$3 # 20 + drotr $17,$1,14 + daddu $16,$12,$7 + drotr $18,$1,18 + and $19,$1 + drotr $7,$1,41 + xor $17,$18 + drotr $18,$24,28 + xor $19,$3 # Ch(e,f,g) + xor $17,$7 # Sigma1(e) + + drotr $7,$24,34 + daddu $16,$19 + ld $19,160($6) # K[20] + xor $7,$18 + drotr $18,$24,39 + daddu $16,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#else + daddu $16,$12,$7 # 20 + dsrl $7,$1,14 + xor $19,$2,$3 + dsll $18,$1,23 + and $19,$1 + dsrl $17,$1,18 + xor $7,$18 + dsll $18,$1,46 + xor $7,$17 + dsrl $17,$1,41 + xor $7,$18 + dsll $18,$1,50 + xor $7,$17 + xor $19,$3 # Ch(e,f,g) + xor $17,$18,$7 # Sigma1(e) + + dsrl $7,$24,28 + daddu $16,$19 + ld $19,160($6) # K[20] + dsll $18,$24,25 + daddu $16,$17 + dsrl $17,$24,34 + xor $7,$18 + dsll $18,$24,30 + xor $7,$17 + dsrl $17,$24,39 + xor $7,$18 + dsll $18,$24,36 + xor $7,$17 + and $17,$25,$30 + xor $7,$18 # Sigma0(a) + xor $18,$25,$30 +#endif + sd $12,32($29) # offload to ring buffer + daddu $7,$17 + and $18,$24 + daddu $16,$19 # +=K[20] + daddu $7,$18 # +=Maj(a,b,c) + daddu $31,$16 + daddu $7,$16 + ld $15,56($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $19,$14,7 # Xupdate(21) + drotr $17,$14,1 + daddu $13,$22 # +=X[i+9] + xor $19,$17 + drotr $17,$14,8 + + dsrl $20,$11,6 + drotr $18,$11,19 + xor $19,$17 # sigma0(X[i+1]) + drotr $17,$11,61 + xor $20,$18 + daddu $13,$19 +#else + dsrl $19,$14,7 # Xupdate(21) + daddu $13,$22 # +=X[i+9] + dsll $18,$14,56 + dsrl $17,$14,1 + xor $19,$18 + dsll $18,7 + xor $19,$17 + dsrl $17,$14,8 + xor $19,$18 + + dsrl $20,$11,6 + xor $19,$17 # sigma0(X[i+1]) + dsll $18,$11,3 + daddu $13,$19 + dsrl $17,$11,19 + xor $20,$18 + dsll $18,42 + xor $20,$17 + dsrl $17,$11,61 + xor $20,$18 +#endif + xor $20,$17 # sigma1(X[i+14]) + daddu $13,$20 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $20,$1,$2 # 21 + drotr $18,$31,14 + daddu $17,$13,$3 + drotr $19,$31,18 + and $20,$31 + drotr $3,$31,41 + xor $18,$19 + drotr $19,$7,28 + xor $20,$2 # Ch(e,f,g) + xor $18,$3 # Sigma1(e) + + drotr $3,$7,34 + daddu $17,$20 + ld $20,168($6) # K[21] + xor $3,$19 + drotr $19,$7,39 + daddu $17,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#else + daddu $17,$13,$3 # 21 + dsrl $3,$31,14 + xor $20,$1,$2 + dsll $19,$31,23 + and $20,$31 + dsrl $18,$31,18 + xor $3,$19 + dsll $19,$31,46 + xor $3,$18 + dsrl $18,$31,41 + xor $3,$19 + dsll $19,$31,50 + xor $3,$18 + xor $20,$2 # Ch(e,f,g) + xor $18,$19,$3 # Sigma1(e) + + dsrl $3,$7,28 + daddu $17,$20 + ld $20,168($6) # K[21] + dsll $19,$7,25 + daddu $17,$18 + dsrl $18,$7,34 + xor $3,$19 + dsll $19,$7,30 + xor $3,$18 + dsrl $18,$7,39 + xor $3,$19 + dsll $19,$7,36 + xor $3,$18 + and $18,$24,$25 + xor $3,$19 # Sigma0(a) + xor $19,$24,$25 +#endif + sd $13,40($29) # offload to ring buffer + daddu $3,$18 + and $19,$7 + daddu $17,$20 # +=K[21] + daddu $3,$19 # +=Maj(a,b,c) + daddu $30,$17 + daddu $3,$17 + ld $16,64($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $20,$15,7 # Xupdate(22) + drotr $18,$15,1 + daddu $14,$23 # +=X[i+9] + xor $20,$18 + drotr $18,$15,8 + + dsrl $21,$12,6 + drotr $19,$12,19 + xor $20,$18 # sigma0(X[i+1]) + drotr $18,$12,61 + xor $21,$19 + daddu $14,$20 +#else + dsrl $20,$15,7 # Xupdate(22) + daddu $14,$23 # +=X[i+9] + dsll $19,$15,56 + dsrl $18,$15,1 + xor $20,$19 + dsll $19,7 + xor $20,$18 + dsrl $18,$15,8 + xor $20,$19 + + dsrl $21,$12,6 + xor $20,$18 # sigma0(X[i+1]) + dsll $19,$12,3 + daddu $14,$20 + dsrl $18,$12,19 + xor $21,$19 + dsll $19,42 + xor $21,$18 + dsrl $18,$12,61 + xor $21,$19 +#endif + xor $21,$18 # sigma1(X[i+14]) + daddu $14,$21 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $21,$31,$1 # 22 + drotr $19,$30,14 + daddu $18,$14,$2 + drotr $20,$30,18 + and $21,$30 + drotr $2,$30,41 + xor $19,$20 + drotr $20,$3,28 + xor $21,$1 # Ch(e,f,g) + xor $19,$2 # Sigma1(e) + + drotr $2,$3,34 + daddu $18,$21 + ld $21,176($6) # K[22] + xor $2,$20 + drotr $20,$3,39 + daddu $18,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#else + daddu $18,$14,$2 # 22 + dsrl $2,$30,14 + xor $21,$31,$1 + dsll $20,$30,23 + and $21,$30 + dsrl $19,$30,18 + xor $2,$20 + dsll $20,$30,46 + xor $2,$19 + dsrl $19,$30,41 + xor $2,$20 + dsll $20,$30,50 + xor $2,$19 + xor $21,$1 # Ch(e,f,g) + xor $19,$20,$2 # Sigma1(e) + + dsrl $2,$3,28 + daddu $18,$21 + ld $21,176($6) # K[22] + dsll $20,$3,25 + daddu $18,$19 + dsrl $19,$3,34 + xor $2,$20 + dsll $20,$3,30 + xor $2,$19 + dsrl $19,$3,39 + xor $2,$20 + dsll $20,$3,36 + xor $2,$19 + and $19,$7,$24 + xor $2,$20 # Sigma0(a) + xor $20,$7,$24 +#endif + sd $14,48($29) # offload to ring buffer + daddu $2,$19 + and $20,$3 + daddu $18,$21 # +=K[22] + daddu $2,$20 # +=Maj(a,b,c) + daddu $25,$18 + daddu $2,$18 + ld $17,72($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $21,$16,7 # Xupdate(23) + drotr $19,$16,1 + daddu $15,$8 # +=X[i+9] + xor $21,$19 + drotr $19,$16,8 + + dsrl $22,$13,6 + drotr $20,$13,19 + xor $21,$19 # sigma0(X[i+1]) + drotr $19,$13,61 + xor $22,$20 + daddu $15,$21 +#else + dsrl $21,$16,7 # Xupdate(23) + daddu $15,$8 # +=X[i+9] + dsll $20,$16,56 + dsrl $19,$16,1 + xor $21,$20 + dsll $20,7 + xor $21,$19 + dsrl $19,$16,8 + xor $21,$20 + + dsrl $22,$13,6 + xor $21,$19 # sigma0(X[i+1]) + dsll $20,$13,3 + daddu $15,$21 + dsrl $19,$13,19 + xor $22,$20 + dsll $20,42 + xor $22,$19 + dsrl $19,$13,61 + xor $22,$20 +#endif + xor $22,$19 # sigma1(X[i+14]) + daddu $15,$22 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $22,$30,$31 # 23 + drotr $20,$25,14 + daddu $19,$15,$1 + drotr $21,$25,18 + and $22,$25 + drotr $1,$25,41 + xor $20,$21 + drotr $21,$2,28 + xor $22,$31 # Ch(e,f,g) + xor $20,$1 # Sigma1(e) + + drotr $1,$2,34 + daddu $19,$22 + ld $22,184($6) # K[23] + xor $1,$21 + drotr $21,$2,39 + daddu $19,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#else + daddu $19,$15,$1 # 23 + dsrl $1,$25,14 + xor $22,$30,$31 + dsll $21,$25,23 + and $22,$25 + dsrl $20,$25,18 + xor $1,$21 + dsll $21,$25,46 + xor $1,$20 + dsrl $20,$25,41 + xor $1,$21 + dsll $21,$25,50 + xor $1,$20 + xor $22,$31 # Ch(e,f,g) + xor $20,$21,$1 # Sigma1(e) + + dsrl $1,$2,28 + daddu $19,$22 + ld $22,184($6) # K[23] + dsll $21,$2,25 + daddu $19,$20 + dsrl $20,$2,34 + xor $1,$21 + dsll $21,$2,30 + xor $1,$20 + dsrl $20,$2,39 + xor $1,$21 + dsll $21,$2,36 + xor $1,$20 + and $20,$3,$7 + xor $1,$21 # Sigma0(a) + xor $21,$3,$7 +#endif + sd $15,56($29) # offload to ring buffer + daddu $1,$20 + and $21,$2 + daddu $19,$22 # +=K[23] + daddu $1,$21 # +=Maj(a,b,c) + daddu $24,$19 + daddu $1,$19 + ld $18,80($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $22,$17,7 # Xupdate(24) + drotr $20,$17,1 + daddu $16,$9 # +=X[i+9] + xor $22,$20 + drotr $20,$17,8 + + dsrl $23,$14,6 + drotr $21,$14,19 + xor $22,$20 # sigma0(X[i+1]) + drotr $20,$14,61 + xor $23,$21 + daddu $16,$22 +#else + dsrl $22,$17,7 # Xupdate(24) + daddu $16,$9 # +=X[i+9] + dsll $21,$17,56 + dsrl $20,$17,1 + xor $22,$21 + dsll $21,7 + xor $22,$20 + dsrl $20,$17,8 + xor $22,$21 + + dsrl $23,$14,6 + xor $22,$20 # sigma0(X[i+1]) + dsll $21,$14,3 + daddu $16,$22 + dsrl $20,$14,19 + xor $23,$21 + dsll $21,42 + xor $23,$20 + dsrl $20,$14,61 + xor $23,$21 +#endif + xor $23,$20 # sigma1(X[i+14]) + daddu $16,$23 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $23,$25,$30 # 24 + drotr $21,$24,14 + daddu $20,$16,$31 + drotr $22,$24,18 + and $23,$24 + drotr $31,$24,41 + xor $21,$22 + drotr $22,$1,28 + xor $23,$30 # Ch(e,f,g) + xor $21,$31 # Sigma1(e) + + drotr $31,$1,34 + daddu $20,$23 + ld $23,192($6) # K[24] + xor $31,$22 + drotr $22,$1,39 + daddu $20,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#else + daddu $20,$16,$31 # 24 + dsrl $31,$24,14 + xor $23,$25,$30 + dsll $22,$24,23 + and $23,$24 + dsrl $21,$24,18 + xor $31,$22 + dsll $22,$24,46 + xor $31,$21 + dsrl $21,$24,41 + xor $31,$22 + dsll $22,$24,50 + xor $31,$21 + xor $23,$30 # Ch(e,f,g) + xor $21,$22,$31 # Sigma1(e) + + dsrl $31,$1,28 + daddu $20,$23 + ld $23,192($6) # K[24] + dsll $22,$1,25 + daddu $20,$21 + dsrl $21,$1,34 + xor $31,$22 + dsll $22,$1,30 + xor $31,$21 + dsrl $21,$1,39 + xor $31,$22 + dsll $22,$1,36 + xor $31,$21 + and $21,$2,$3 + xor $31,$22 # Sigma0(a) + xor $22,$2,$3 +#endif + sd $16,64($29) # offload to ring buffer + daddu $31,$21 + and $22,$1 + daddu $20,$23 # +=K[24] + daddu $31,$22 # +=Maj(a,b,c) + daddu $7,$20 + daddu $31,$20 + ld $19,88($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $23,$18,7 # Xupdate(25) + drotr $21,$18,1 + daddu $17,$10 # +=X[i+9] + xor $23,$21 + drotr $21,$18,8 + + dsrl $8,$15,6 + drotr $22,$15,19 + xor $23,$21 # sigma0(X[i+1]) + drotr $21,$15,61 + xor $8,$22 + daddu $17,$23 +#else + dsrl $23,$18,7 # Xupdate(25) + daddu $17,$10 # +=X[i+9] + dsll $22,$18,56 + dsrl $21,$18,1 + xor $23,$22 + dsll $22,7 + xor $23,$21 + dsrl $21,$18,8 + xor $23,$22 + + dsrl $8,$15,6 + xor $23,$21 # sigma0(X[i+1]) + dsll $22,$15,3 + daddu $17,$23 + dsrl $21,$15,19 + xor $8,$22 + dsll $22,42 + xor $8,$21 + dsrl $21,$15,61 + xor $8,$22 +#endif + xor $8,$21 # sigma1(X[i+14]) + daddu $17,$8 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $8,$24,$25 # 25 + drotr $22,$7,14 + daddu $21,$17,$30 + drotr $23,$7,18 + and $8,$7 + drotr $30,$7,41 + xor $22,$23 + drotr $23,$31,28 + xor $8,$25 # Ch(e,f,g) + xor $22,$30 # Sigma1(e) + + drotr $30,$31,34 + daddu $21,$8 + ld $8,200($6) # K[25] + xor $30,$23 + drotr $23,$31,39 + daddu $21,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#else + daddu $21,$17,$30 # 25 + dsrl $30,$7,14 + xor $8,$24,$25 + dsll $23,$7,23 + and $8,$7 + dsrl $22,$7,18 + xor $30,$23 + dsll $23,$7,46 + xor $30,$22 + dsrl $22,$7,41 + xor $30,$23 + dsll $23,$7,50 + xor $30,$22 + xor $8,$25 # Ch(e,f,g) + xor $22,$23,$30 # Sigma1(e) + + dsrl $30,$31,28 + daddu $21,$8 + ld $8,200($6) # K[25] + dsll $23,$31,25 + daddu $21,$22 + dsrl $22,$31,34 + xor $30,$23 + dsll $23,$31,30 + xor $30,$22 + dsrl $22,$31,39 + xor $30,$23 + dsll $23,$31,36 + xor $30,$22 + and $22,$1,$2 + xor $30,$23 # Sigma0(a) + xor $23,$1,$2 +#endif + sd $17,72($29) # offload to ring buffer + daddu $30,$22 + and $23,$31 + daddu $21,$8 # +=K[25] + daddu $30,$23 # +=Maj(a,b,c) + daddu $3,$21 + daddu $30,$21 + ld $20,96($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $8,$19,7 # Xupdate(26) + drotr $22,$19,1 + daddu $18,$11 # +=X[i+9] + xor $8,$22 + drotr $22,$19,8 + + dsrl $9,$16,6 + drotr $23,$16,19 + xor $8,$22 # sigma0(X[i+1]) + drotr $22,$16,61 + xor $9,$23 + daddu $18,$8 +#else + dsrl $8,$19,7 # Xupdate(26) + daddu $18,$11 # +=X[i+9] + dsll $23,$19,56 + dsrl $22,$19,1 + xor $8,$23 + dsll $23,7 + xor $8,$22 + dsrl $22,$19,8 + xor $8,$23 + + dsrl $9,$16,6 + xor $8,$22 # sigma0(X[i+1]) + dsll $23,$16,3 + daddu $18,$8 + dsrl $22,$16,19 + xor $9,$23 + dsll $23,42 + xor $9,$22 + dsrl $22,$16,61 + xor $9,$23 +#endif + xor $9,$22 # sigma1(X[i+14]) + daddu $18,$9 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $9,$7,$24 # 26 + drotr $23,$3,14 + daddu $22,$18,$25 + drotr $8,$3,18 + and $9,$3 + drotr $25,$3,41 + xor $23,$8 + drotr $8,$30,28 + xor $9,$24 # Ch(e,f,g) + xor $23,$25 # Sigma1(e) + + drotr $25,$30,34 + daddu $22,$9 + ld $9,208($6) # K[26] + xor $25,$8 + drotr $8,$30,39 + daddu $22,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#else + daddu $22,$18,$25 # 26 + dsrl $25,$3,14 + xor $9,$7,$24 + dsll $8,$3,23 + and $9,$3 + dsrl $23,$3,18 + xor $25,$8 + dsll $8,$3,46 + xor $25,$23 + dsrl $23,$3,41 + xor $25,$8 + dsll $8,$3,50 + xor $25,$23 + xor $9,$24 # Ch(e,f,g) + xor $23,$8,$25 # Sigma1(e) + + dsrl $25,$30,28 + daddu $22,$9 + ld $9,208($6) # K[26] + dsll $8,$30,25 + daddu $22,$23 + dsrl $23,$30,34 + xor $25,$8 + dsll $8,$30,30 + xor $25,$23 + dsrl $23,$30,39 + xor $25,$8 + dsll $8,$30,36 + xor $25,$23 + and $23,$31,$1 + xor $25,$8 # Sigma0(a) + xor $8,$31,$1 +#endif + sd $18,80($29) # offload to ring buffer + daddu $25,$23 + and $8,$30 + daddu $22,$9 # +=K[26] + daddu $25,$8 # +=Maj(a,b,c) + daddu $2,$22 + daddu $25,$22 + ld $21,104($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $9,$20,7 # Xupdate(27) + drotr $23,$20,1 + daddu $19,$12 # +=X[i+9] + xor $9,$23 + drotr $23,$20,8 + + dsrl $10,$17,6 + drotr $8,$17,19 + xor $9,$23 # sigma0(X[i+1]) + drotr $23,$17,61 + xor $10,$8 + daddu $19,$9 +#else + dsrl $9,$20,7 # Xupdate(27) + daddu $19,$12 # +=X[i+9] + dsll $8,$20,56 + dsrl $23,$20,1 + xor $9,$8 + dsll $8,7 + xor $9,$23 + dsrl $23,$20,8 + xor $9,$8 + + dsrl $10,$17,6 + xor $9,$23 # sigma0(X[i+1]) + dsll $8,$17,3 + daddu $19,$9 + dsrl $23,$17,19 + xor $10,$8 + dsll $8,42 + xor $10,$23 + dsrl $23,$17,61 + xor $10,$8 +#endif + xor $10,$23 # sigma1(X[i+14]) + daddu $19,$10 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $10,$3,$7 # 27 + drotr $8,$2,14 + daddu $23,$19,$24 + drotr $9,$2,18 + and $10,$2 + drotr $24,$2,41 + xor $8,$9 + drotr $9,$25,28 + xor $10,$7 # Ch(e,f,g) + xor $8,$24 # Sigma1(e) + + drotr $24,$25,34 + daddu $23,$10 + ld $10,216($6) # K[27] + xor $24,$9 + drotr $9,$25,39 + daddu $23,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#else + daddu $23,$19,$24 # 27 + dsrl $24,$2,14 + xor $10,$3,$7 + dsll $9,$2,23 + and $10,$2 + dsrl $8,$2,18 + xor $24,$9 + dsll $9,$2,46 + xor $24,$8 + dsrl $8,$2,41 + xor $24,$9 + dsll $9,$2,50 + xor $24,$8 + xor $10,$7 # Ch(e,f,g) + xor $8,$9,$24 # Sigma1(e) + + dsrl $24,$25,28 + daddu $23,$10 + ld $10,216($6) # K[27] + dsll $9,$25,25 + daddu $23,$8 + dsrl $8,$25,34 + xor $24,$9 + dsll $9,$25,30 + xor $24,$8 + dsrl $8,$25,39 + xor $24,$9 + dsll $9,$25,36 + xor $24,$8 + and $8,$30,$31 + xor $24,$9 # Sigma0(a) + xor $9,$30,$31 +#endif + sd $19,88($29) # offload to ring buffer + daddu $24,$8 + and $9,$25 + daddu $23,$10 # +=K[27] + daddu $24,$9 # +=Maj(a,b,c) + daddu $1,$23 + daddu $24,$23 + ld $22,112($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $10,$21,7 # Xupdate(28) + drotr $8,$21,1 + daddu $20,$13 # +=X[i+9] + xor $10,$8 + drotr $8,$21,8 + + dsrl $11,$18,6 + drotr $9,$18,19 + xor $10,$8 # sigma0(X[i+1]) + drotr $8,$18,61 + xor $11,$9 + daddu $20,$10 +#else + dsrl $10,$21,7 # Xupdate(28) + daddu $20,$13 # +=X[i+9] + dsll $9,$21,56 + dsrl $8,$21,1 + xor $10,$9 + dsll $9,7 + xor $10,$8 + dsrl $8,$21,8 + xor $10,$9 + + dsrl $11,$18,6 + xor $10,$8 # sigma0(X[i+1]) + dsll $9,$18,3 + daddu $20,$10 + dsrl $8,$18,19 + xor $11,$9 + dsll $9,42 + xor $11,$8 + dsrl $8,$18,61 + xor $11,$9 +#endif + xor $11,$8 # sigma1(X[i+14]) + daddu $20,$11 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $11,$2,$3 # 28 + drotr $9,$1,14 + daddu $8,$20,$7 + drotr $10,$1,18 + and $11,$1 + drotr $7,$1,41 + xor $9,$10 + drotr $10,$24,28 + xor $11,$3 # Ch(e,f,g) + xor $9,$7 # Sigma1(e) + + drotr $7,$24,34 + daddu $8,$11 + ld $11,224($6) # K[28] + xor $7,$10 + drotr $10,$24,39 + daddu $8,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#else + daddu $8,$20,$7 # 28 + dsrl $7,$1,14 + xor $11,$2,$3 + dsll $10,$1,23 + and $11,$1 + dsrl $9,$1,18 + xor $7,$10 + dsll $10,$1,46 + xor $7,$9 + dsrl $9,$1,41 + xor $7,$10 + dsll $10,$1,50 + xor $7,$9 + xor $11,$3 # Ch(e,f,g) + xor $9,$10,$7 # Sigma1(e) + + dsrl $7,$24,28 + daddu $8,$11 + ld $11,224($6) # K[28] + dsll $10,$24,25 + daddu $8,$9 + dsrl $9,$24,34 + xor $7,$10 + dsll $10,$24,30 + xor $7,$9 + dsrl $9,$24,39 + xor $7,$10 + dsll $10,$24,36 + xor $7,$9 + and $9,$25,$30 + xor $7,$10 # Sigma0(a) + xor $10,$25,$30 +#endif + sd $20,96($29) # offload to ring buffer + daddu $7,$9 + and $10,$24 + daddu $8,$11 # +=K[28] + daddu $7,$10 # +=Maj(a,b,c) + daddu $31,$8 + daddu $7,$8 + ld $23,120($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $11,$22,7 # Xupdate(29) + drotr $9,$22,1 + daddu $21,$14 # +=X[i+9] + xor $11,$9 + drotr $9,$22,8 + + dsrl $12,$19,6 + drotr $10,$19,19 + xor $11,$9 # sigma0(X[i+1]) + drotr $9,$19,61 + xor $12,$10 + daddu $21,$11 +#else + dsrl $11,$22,7 # Xupdate(29) + daddu $21,$14 # +=X[i+9] + dsll $10,$22,56 + dsrl $9,$22,1 + xor $11,$10 + dsll $10,7 + xor $11,$9 + dsrl $9,$22,8 + xor $11,$10 + + dsrl $12,$19,6 + xor $11,$9 # sigma0(X[i+1]) + dsll $10,$19,3 + daddu $21,$11 + dsrl $9,$19,19 + xor $12,$10 + dsll $10,42 + xor $12,$9 + dsrl $9,$19,61 + xor $12,$10 +#endif + xor $12,$9 # sigma1(X[i+14]) + daddu $21,$12 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $12,$1,$2 # 29 + drotr $10,$31,14 + daddu $9,$21,$3 + drotr $11,$31,18 + and $12,$31 + drotr $3,$31,41 + xor $10,$11 + drotr $11,$7,28 + xor $12,$2 # Ch(e,f,g) + xor $10,$3 # Sigma1(e) + + drotr $3,$7,34 + daddu $9,$12 + ld $12,232($6) # K[29] + xor $3,$11 + drotr $11,$7,39 + daddu $9,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#else + daddu $9,$21,$3 # 29 + dsrl $3,$31,14 + xor $12,$1,$2 + dsll $11,$31,23 + and $12,$31 + dsrl $10,$31,18 + xor $3,$11 + dsll $11,$31,46 + xor $3,$10 + dsrl $10,$31,41 + xor $3,$11 + dsll $11,$31,50 + xor $3,$10 + xor $12,$2 # Ch(e,f,g) + xor $10,$11,$3 # Sigma1(e) + + dsrl $3,$7,28 + daddu $9,$12 + ld $12,232($6) # K[29] + dsll $11,$7,25 + daddu $9,$10 + dsrl $10,$7,34 + xor $3,$11 + dsll $11,$7,30 + xor $3,$10 + dsrl $10,$7,39 + xor $3,$11 + dsll $11,$7,36 + xor $3,$10 + and $10,$24,$25 + xor $3,$11 # Sigma0(a) + xor $11,$24,$25 +#endif + sd $21,104($29) # offload to ring buffer + daddu $3,$10 + and $11,$7 + daddu $9,$12 # +=K[29] + daddu $3,$11 # +=Maj(a,b,c) + daddu $30,$9 + daddu $3,$9 + ld $8,0($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $12,$23,7 # Xupdate(30) + drotr $10,$23,1 + daddu $22,$15 # +=X[i+9] + xor $12,$10 + drotr $10,$23,8 + + dsrl $13,$20,6 + drotr $11,$20,19 + xor $12,$10 # sigma0(X[i+1]) + drotr $10,$20,61 + xor $13,$11 + daddu $22,$12 +#else + dsrl $12,$23,7 # Xupdate(30) + daddu $22,$15 # +=X[i+9] + dsll $11,$23,56 + dsrl $10,$23,1 + xor $12,$11 + dsll $11,7 + xor $12,$10 + dsrl $10,$23,8 + xor $12,$11 + + dsrl $13,$20,6 + xor $12,$10 # sigma0(X[i+1]) + dsll $11,$20,3 + daddu $22,$12 + dsrl $10,$20,19 + xor $13,$11 + dsll $11,42 + xor $13,$10 + dsrl $10,$20,61 + xor $13,$11 +#endif + xor $13,$10 # sigma1(X[i+14]) + daddu $22,$13 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $13,$31,$1 # 30 + drotr $11,$30,14 + daddu $10,$22,$2 + drotr $12,$30,18 + and $13,$30 + drotr $2,$30,41 + xor $11,$12 + drotr $12,$3,28 + xor $13,$1 # Ch(e,f,g) + xor $11,$2 # Sigma1(e) + + drotr $2,$3,34 + daddu $10,$13 + ld $13,240($6) # K[30] + xor $2,$12 + drotr $12,$3,39 + daddu $10,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#else + daddu $10,$22,$2 # 30 + dsrl $2,$30,14 + xor $13,$31,$1 + dsll $12,$30,23 + and $13,$30 + dsrl $11,$30,18 + xor $2,$12 + dsll $12,$30,46 + xor $2,$11 + dsrl $11,$30,41 + xor $2,$12 + dsll $12,$30,50 + xor $2,$11 + xor $13,$1 # Ch(e,f,g) + xor $11,$12,$2 # Sigma1(e) + + dsrl $2,$3,28 + daddu $10,$13 + ld $13,240($6) # K[30] + dsll $12,$3,25 + daddu $10,$11 + dsrl $11,$3,34 + xor $2,$12 + dsll $12,$3,30 + xor $2,$11 + dsrl $11,$3,39 + xor $2,$12 + dsll $12,$3,36 + xor $2,$11 + and $11,$7,$24 + xor $2,$12 # Sigma0(a) + xor $12,$7,$24 +#endif + sd $22,112($29) # offload to ring buffer + daddu $2,$11 + and $12,$3 + daddu $10,$13 # +=K[30] + daddu $2,$12 # +=Maj(a,b,c) + daddu $25,$10 + daddu $2,$10 + ld $9,8($29) # prefetch from ring buffer +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + dsrl $13,$8,7 # Xupdate(31) + drotr $11,$8,1 + daddu $23,$16 # +=X[i+9] + xor $13,$11 + drotr $11,$8,8 + + dsrl $14,$21,6 + drotr $12,$21,19 + xor $13,$11 # sigma0(X[i+1]) + drotr $11,$21,61 + xor $14,$12 + daddu $23,$13 +#else + dsrl $13,$8,7 # Xupdate(31) + daddu $23,$16 # +=X[i+9] + dsll $12,$8,56 + dsrl $11,$8,1 + xor $13,$12 + dsll $12,7 + xor $13,$11 + dsrl $11,$8,8 + xor $13,$12 + + dsrl $14,$21,6 + xor $13,$11 # sigma0(X[i+1]) + dsll $12,$21,3 + daddu $23,$13 + dsrl $11,$21,19 + xor $14,$12 + dsll $12,42 + xor $14,$11 + dsrl $11,$21,61 + xor $14,$12 +#endif + xor $14,$11 # sigma1(X[i+14]) + daddu $23,$14 +#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2) + xor $14,$30,$31 # 31 + drotr $12,$25,14 + daddu $11,$23,$1 + drotr $13,$25,18 + and $14,$25 + drotr $1,$25,41 + xor $12,$13 + drotr $13,$2,28 + xor $14,$31 # Ch(e,f,g) + xor $12,$1 # Sigma1(e) + + drotr $1,$2,34 + daddu $11,$14 + ld $14,248($6) # K[31] + xor $1,$13 + drotr $13,$2,39 + daddu $11,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#else + daddu $11,$23,$1 # 31 + dsrl $1,$25,14 + xor $14,$30,$31 + dsll $13,$25,23 + and $14,$25 + dsrl $12,$25,18 + xor $1,$13 + dsll $13,$25,46 + xor $1,$12 + dsrl $12,$25,41 + xor $1,$13 + dsll $13,$25,50 + xor $1,$12 + xor $14,$31 # Ch(e,f,g) + xor $12,$13,$1 # Sigma1(e) + + dsrl $1,$2,28 + daddu $11,$14 + ld $14,248($6) # K[31] + dsll $13,$2,25 + daddu $11,$12 + dsrl $12,$2,34 + xor $1,$13 + dsll $13,$2,30 + xor $1,$12 + dsrl $12,$2,39 + xor $1,$13 + dsll $13,$2,36 + xor $1,$12 + and $12,$3,$7 + xor $1,$13 # Sigma0(a) + xor $13,$3,$7 +#endif + sd $23,120($29) # offload to ring buffer + daddu $1,$12 + and $13,$2 + daddu $11,$14 # +=K[31] + daddu $1,$13 # +=Maj(a,b,c) + daddu $24,$11 + daddu $1,$11 + ld $10,16($29) # prefetch from ring buffer + and $14,0xfff + li $15,2071 + .set noreorder + bne $14,$15,.L16_xx + daddu $6,16*8 # Ktbl+=16 + + ld $23,16*8($29) # restore pointer to the end of input + ld $8,0*8($4) + ld $9,1*8($4) + ld $10,2*8($4) + daddu $5,16*8 + ld $11,3*8($4) + daddu $1,$8 + ld $12,4*8($4) + daddu $2,$9 + ld $13,5*8($4) + daddu $3,$10 + ld $14,6*8($4) + daddu $7,$11 + ld $15,7*8($4) + daddu $24,$12 + sd $1,0*8($4) + daddu $25,$13 + sd $2,1*8($4) + daddu $30,$14 + sd $3,2*8($4) + daddu $31,$15 + sd $7,3*8($4) + sd $24,4*8($4) + sd $25,5*8($4) + sd $30,6*8($4) + sd $31,7*8($4) + + bne $5,$23,.Loop + dsubu $6,512 # rewind $6 + + ld $31,256-1*8($29) + ld $30,256-2*8($29) + ld $23,256-3*8($29) + ld $22,256-4*8($29) + ld $21,256-5*8($29) + ld $20,256-6*8($29) + ld $19,256-7*8($29) + ld $18,256-8*8($29) + ld $17,256-9*8($29) + ld $16,256-10*8($29) + jr $31 + daddu $29,256 +.end sha512_block_data_order + +.rdata +.align 5 +K512: + .dword 0x428a2f98d728ae22, 0x7137449123ef65cd + .dword 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc + .dword 0x3956c25bf348b538, 0x59f111f1b605d019 + .dword 0x923f82a4af194f9b, 0xab1c5ed5da6d8118 + .dword 0xd807aa98a3030242, 0x12835b0145706fbe + .dword 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2 + .dword 0x72be5d74f27b896f, 0x80deb1fe3b1696b1 + .dword 0x9bdc06a725c71235, 0xc19bf174cf692694 + .dword 0xe49b69c19ef14ad2, 0xefbe4786384f25e3 + .dword 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65 + .dword 0x2de92c6f592b0275, 0x4a7484aa6ea6e483 + .dword 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5 + .dword 0x983e5152ee66dfab, 0xa831c66d2db43210 + .dword 0xb00327c898fb213f, 0xbf597fc7beef0ee4 + .dword 0xc6e00bf33da88fc2, 0xd5a79147930aa725 + .dword 0x06ca6351e003826f, 0x142929670a0e6e70 + .dword 0x27b70a8546d22ffc, 0x2e1b21385c26c926 + .dword 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df + .dword 0x650a73548baf63de, 0x766a0abb3c77b2a8 + .dword 0x81c2c92e47edaee6, 0x92722c851482353b + .dword 0xa2bfe8a14cf10364, 0xa81a664bbc423001 + .dword 0xc24b8b70d0f89791, 0xc76c51a30654be30 + .dword 0xd192e819d6ef5218, 0xd69906245565a910 + .dword 0xf40e35855771202a, 0x106aa07032bbd1b8 + .dword 0x19a4c116b8d2d0c8, 0x1e376c085141ab53 + .dword 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8 + .dword 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb + .dword 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3 + .dword 0x748f82ee5defb2fc, 0x78a5636f43172f60 + .dword 0x84c87814a1f0ab72, 0x8cc702081a6439ec + .dword 0x90befffa23631e28, 0xa4506cebde82bde9 + .dword 0xbef9a3f7b2c67915, 0xc67178f2e372532b + .dword 0xca273eceea26619c, 0xd186b8c721c0c207 + .dword 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178 + .dword 0x06f067aa72176fba, 0x0a637dc5a2c898a6 + .dword 0x113f9804bef90dae, 0x1b710b35131c471b + .dword 0x28db77f523047d84, 0x32caab7b40c72493 + .dword 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c + .dword 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a + .dword 0x5fcb6fab3ad6faec, 0x6c44198c4a475817 +.asciiz "SHA512 for MIPS, CRYPTOGAMS by " +.align 5 + diff --git a/deps/openssl/config/archs/linux64-mips64/no-asm/configdata.pm b/deps/openssl/config/archs/linux64-mips64/no-asm/configdata.pm index 62586346a8d849..11dae65985f330 100644 --- a/deps/openssl/config/archs/linux64-mips64/no-asm/configdata.pm +++ b/deps/openssl/config/archs/linux64-mips64/no-asm/configdata.pm @@ -62,7 +62,7 @@ our %config = ( options => "enable-ssl-trace no-afalgeng no-asan no-asm no-buildtest-c++ no-comp no-crypto-mdebug no-crypto-mdebug-backtrace no-devcryptoeng no-dynamic-engine no-ec_nistp_64_gcc_128 no-egd no-external-tests no-fuzz-afl no-fuzz-libfuzzer no-heartbeats no-md2 no-msan no-rc5 no-sctp no-shared no-ssl3 no-ssl3-method no-ubsan no-unit-test no-weak-ssl-ciphers no-zlib no-zlib-dynamic", perl_archname => "x86_64-linux-gnu-thread-multi", perl_cmd => "/usr/bin/perl", - perl_version => "5.28.1", + perl_version => "5.26.1", perlargv => [ "no-comp", "no-shared", "no-afalgeng", "enable-ssl-trace", "no-asm", "linux64-mips64" ], perlenv => { "AR" => undef, @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux64-mips64", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( @@ -15324,3 +15324,4 @@ Verbose output. =back =cut + diff --git a/deps/openssl/config/archs/linux64-mips64/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/linux64-mips64/no-asm/crypto/buildinf.h index f58b88ffe332db..ccc6b1170b9199 100644 --- a/deps/openssl/config/archs/linux64-mips64/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux64-mips64/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux64-mips64" -#define DATE "built on: Wed Mar 18 21:09:46 2020 UTC" +#define DATE "built on: Tue Mar 31 13:08:22 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux64-s390x/asm/configdata.pm b/deps/openssl/config/archs/linux64-s390x/asm/configdata.pm index 0977c59b656c49..c61d434ab44595 100644 --- a/deps/openssl/config/archs/linux64-s390x/asm/configdata.pm +++ b/deps/openssl/config/archs/linux64-s390x/asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux64-s390x", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux64-s390x/asm/crypto/buildinf.h b/deps/openssl/config/archs/linux64-s390x/asm/crypto/buildinf.h index 8e1f78551c0895..f25959528c44be 100644 --- a/deps/openssl/config/archs/linux64-s390x/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux64-s390x/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux64-s390x" -#define DATE "built on: Mon Mar 23 17:57:57 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:44 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux64-s390x/asm_avx2/configdata.pm b/deps/openssl/config/archs/linux64-s390x/asm_avx2/configdata.pm index 2020c38589d592..04b55956b4cff4 100644 --- a/deps/openssl/config/archs/linux64-s390x/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/linux64-s390x/asm_avx2/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux64-s390x", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux64-s390x/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/linux64-s390x/asm_avx2/crypto/buildinf.h index 2a56e2351033f8..4f8f89dddbd306 100644 --- a/deps/openssl/config/archs/linux64-s390x/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux64-s390x/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux64-s390x" -#define DATE "built on: Mon Mar 23 17:58:08 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:46 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/linux64-s390x/no-asm/configdata.pm b/deps/openssl/config/archs/linux64-s390x/no-asm/configdata.pm index 444bbb536ad023..e6d0616af6824e 100644 --- a/deps/openssl/config/archs/linux64-s390x/no-asm/configdata.pm +++ b/deps/openssl/config/archs/linux64-s390x/no-asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "linux64-s390x", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/linux64-s390x/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/linux64-s390x/no-asm/crypto/buildinf.h index 5e1ab4482bdc3c..a24755c3712ab3 100644 --- a/deps/openssl/config/archs/linux64-s390x/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/linux64-s390x/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: linux64-s390x" -#define DATE "built on: Mon Mar 23 17:58:38 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:49 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/solaris-x86-gcc/asm/configdata.pm b/deps/openssl/config/archs/solaris-x86-gcc/asm/configdata.pm index fb5d39df0fabd4..c61742b743fac8 100644 --- a/deps/openssl/config/archs/solaris-x86-gcc/asm/configdata.pm +++ b/deps/openssl/config/archs/solaris-x86-gcc/asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "solaris-x86-gcc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/solaris-x86-gcc/asm/crypto/buildinf.h b/deps/openssl/config/archs/solaris-x86-gcc/asm/crypto/buildinf.h index ae75bf7fa7a976..5ebbad083a7d58 100644 --- a/deps/openssl/config/archs/solaris-x86-gcc/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/solaris-x86-gcc/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: solaris-x86-gcc" -#define DATE "built on: Mon Mar 23 17:58:57 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:51 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/solaris-x86-gcc/asm_avx2/configdata.pm b/deps/openssl/config/archs/solaris-x86-gcc/asm_avx2/configdata.pm index 5b971fcf2ceb8c..d6aeaa19e9e007 100644 --- a/deps/openssl/config/archs/solaris-x86-gcc/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/solaris-x86-gcc/asm_avx2/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "solaris-x86-gcc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/solaris-x86-gcc/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/solaris-x86-gcc/asm_avx2/crypto/buildinf.h index 1b9ba24aa04173..577790970225fa 100644 --- a/deps/openssl/config/archs/solaris-x86-gcc/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/solaris-x86-gcc/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: solaris-x86-gcc" -#define DATE "built on: Mon Mar 23 17:59:10 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:55 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/solaris-x86-gcc/no-asm/configdata.pm b/deps/openssl/config/archs/solaris-x86-gcc/no-asm/configdata.pm index c8cd56b7a2736e..57ea74cf2b3c17 100644 --- a/deps/openssl/config/archs/solaris-x86-gcc/no-asm/configdata.pm +++ b/deps/openssl/config/archs/solaris-x86-gcc/no-asm/configdata.pm @@ -110,8 +110,8 @@ our %config = ( sourcedir => ".", target => "solaris-x86-gcc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/solaris-x86-gcc/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/solaris-x86-gcc/no-asm/crypto/buildinf.h index 0030a3da46464f..9acc66330ae0f7 100644 --- a/deps/openssl/config/archs/solaris-x86-gcc/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/solaris-x86-gcc/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: solaris-x86-gcc" -#define DATE "built on: Mon Mar 23 17:59:26 2020 UTC" +#define DATE "built on: Wed Apr 1 16:08:59 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/configdata.pm b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/configdata.pm index 78c4b0e68f7e6b..91ad8ea48bbe5c 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/configdata.pm +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "solaris64-x86_64-gcc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-sha1-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-sha1-x86_64.s index 978bd2b6239c15..a38e21f0484e24 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-sha1-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-sha1-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha1_enc,@function .align 32 aesni_cbc_sha1_enc: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+0(%rip),%r10d movq OPENSSL_ia32cap_P+4(%rip),%r11 @@ -18,7 +18,7 @@ aesni_cbc_sha1_enc: je aesni_cbc_sha1_enc_avx jmp aesni_cbc_sha1_enc_ssse3 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc .type aesni_cbc_sha1_enc_ssse3,@function .align 32 @@ -2732,7 +2732,7 @@ K_XX_XX: .type aesni_cbc_sha1_enc_shaext,@function .align 32 aesni_cbc_sha1_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 movdqu (%r9),%xmm8 movd 16(%r9),%xmm9 @@ -3031,5 +3031,5 @@ aesni_cbc_sha1_enc_shaext: movdqu %xmm8,(%r9) movd %xmm9,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-sha256-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-sha256-x86_64.s index dd09f1b290af62..3e56a82578a354 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-sha256-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-sha256-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha256_enc,@function .align 16 aesni_cbc_sha256_enc: -.cfi_startproc +.cfi_startproc leaq OPENSSL_ia32cap_P(%rip),%r11 movl $1,%eax cmpq $0,%rdi @@ -31,7 +31,7 @@ aesni_cbc_sha256_enc: ud2 .Lprobe: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc,.-aesni_cbc_sha256_enc .align 64 @@ -4081,7 +4081,7 @@ aesni_cbc_sha256_enc_avx2: .type aesni_cbc_sha256_enc_shaext,@function .align 32 aesni_cbc_sha256_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 leaq K256+128(%rip),%rax movdqu (%r9),%xmm1 @@ -4431,5 +4431,5 @@ aesni_cbc_sha256_enc_shaext: movdqu %xmm1,(%r9) movdqu %xmm2,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc_shaext,.-aesni_cbc_sha256_enc_shaext diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-x86_64.s index c1e791eff59235..1a4b22e7b8a5d9 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-x86_64.s @@ -861,7 +861,7 @@ aesni_ecb_encrypt: .type aesni_ccm64_encrypt_blocks,@function .align 16 aesni_ccm64_encrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movdqu (%r8),%xmm6 movdqa .Lincrement64(%rip),%xmm9 @@ -920,13 +920,13 @@ aesni_ccm64_encrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks .globl aesni_ccm64_decrypt_blocks .type aesni_ccm64_decrypt_blocks,@function .align 16 aesni_ccm64_decrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movups (%r8),%xmm6 movdqu (%r9),%xmm3 @@ -1019,7 +1019,7 @@ aesni_ccm64_decrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks .globl aesni_ctr32_encrypt_blocks .type aesni_ctr32_encrypt_blocks,@function @@ -2794,7 +2794,7 @@ aesni_ocb_encrypt: .type __ocb_encrypt6,@function .align 32 __ocb_encrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2892,13 +2892,13 @@ __ocb_encrypt6: .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt6,.-__ocb_encrypt6 .type __ocb_encrypt4,@function .align 32 __ocb_encrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2963,13 +2963,13 @@ __ocb_encrypt4: .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt4,.-__ocb_encrypt4 .type __ocb_encrypt1,@function .align 32 __ocb_encrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm2,%xmm8 @@ -3000,7 +3000,7 @@ __ocb_encrypt1: .byte 102,15,56,221,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt1,.-__ocb_encrypt1 .globl aesni_ocb_decrypt @@ -3243,7 +3243,7 @@ aesni_ocb_decrypt: .type __ocb_decrypt6,@function .align 32 __ocb_decrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3335,13 +3335,13 @@ __ocb_decrypt6: .byte 102,65,15,56,223,246 .byte 102,65,15,56,223,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt6,.-__ocb_decrypt6 .type __ocb_decrypt4,@function .align 32 __ocb_decrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3402,13 +3402,13 @@ __ocb_decrypt4: .byte 102,65,15,56,223,228 .byte 102,65,15,56,223,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt4,.-__ocb_decrypt4 .type __ocb_decrypt1,@function .align 32 __ocb_decrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm7,%xmm2 @@ -3438,7 +3438,7 @@ __ocb_decrypt1: .byte 102,15,56,223,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt1,.-__ocb_decrypt1 .globl aesni_cbc_encrypt .type aesni_cbc_encrypt,@function @@ -4447,7 +4447,7 @@ __aesni_set_encrypt_key: shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_set_encrypt_key,.-aesni_set_encrypt_key .size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key .align 64 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/bn/rsaz-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/bn/rsaz-x86_64.s index 7876e0b8f93d9c..d5025b23cd668e 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/bn/rsaz-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/bn/rsaz-x86_64.s @@ -1453,7 +1453,7 @@ rsaz_512_mul_by_one: .type __rsaz_512_reduce,@function .align 32 __rsaz_512_reduce: -.cfi_startproc +.cfi_startproc movq %r8,%rbx imulq 128+8(%rsp),%rbx movq 0(%rbp),%rax @@ -1533,12 +1533,12 @@ __rsaz_512_reduce: jne .Lreduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reduce,.-__rsaz_512_reduce .type __rsaz_512_reducex,@function .align 32 __rsaz_512_reducex: -.cfi_startproc +.cfi_startproc imulq %r8,%rdx xorq %rsi,%rsi @@ -1591,12 +1591,12 @@ __rsaz_512_reducex: jne .Lreduction_loopx .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reducex,.-__rsaz_512_reducex .type __rsaz_512_subtract,@function .align 32 __rsaz_512_subtract: -.cfi_startproc +.cfi_startproc movq %r8,(%rdi) movq %r9,8(%rdi) movq %r10,16(%rdi) @@ -1650,12 +1650,12 @@ __rsaz_512_subtract: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_subtract,.-__rsaz_512_subtract .type __rsaz_512_mul,@function .align 32 __rsaz_512_mul: -.cfi_startproc +.cfi_startproc leaq 8(%rsp),%rdi movq (%rsi),%rax @@ -1794,12 +1794,12 @@ __rsaz_512_mul: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mul,.-__rsaz_512_mul .type __rsaz_512_mulx,@function .align 32 __rsaz_512_mulx: -.cfi_startproc +.cfi_startproc mulxq (%rsi),%rbx,%r8 movq $-6,%rcx @@ -1916,13 +1916,13 @@ __rsaz_512_mulx: movq %r15,8+64+56(%rsp) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mulx,.-__rsaz_512_mulx .globl rsaz_512_scatter4 .type rsaz_512_scatter4,@function .align 16 rsaz_512_scatter4: -.cfi_startproc +.cfi_startproc leaq (%rdi,%rdx,8),%rdi movl $8,%r9d jmp .Loop_scatter @@ -1935,14 +1935,14 @@ rsaz_512_scatter4: decl %r9d jnz .Loop_scatter .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size rsaz_512_scatter4,.-rsaz_512_scatter4 .globl rsaz_512_gather4 .type rsaz_512_gather4,@function .align 16 rsaz_512_gather4: -.cfi_startproc +.cfi_startproc movd %edx,%xmm8 movdqa .Linc+16(%rip),%xmm1 movdqa .Linc(%rip),%xmm0 @@ -2006,7 +2006,7 @@ rsaz_512_gather4: jnz .Loop_gather .byte 0xf3,0xc3 .LSEH_end_rsaz_512_gather4: -.cfi_endproc +.cfi_endproc .size rsaz_512_gather4,.-rsaz_512_gather4 .align 64 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/bn/x86_64-mont5.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/bn/x86_64-mont5.s index 40a60a3c8fc6b9..ab93b02d8c1aae 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/bn/x86_64-mont5.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/bn/x86_64-mont5.s @@ -550,7 +550,7 @@ bn_mul4x_mont_gather5: .type mul4x_internal,@function .align 32 mul4x_internal: -.cfi_startproc +.cfi_startproc shlq $5,%r9 movd 8(%rax),%xmm5 leaq .Linc(%rip),%rax @@ -1072,7 +1072,7 @@ mul4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry -.cfi_endproc +.cfi_endproc .size mul4x_internal,.-mul4x_internal .globl bn_power5 .type bn_power5,@function @@ -1215,7 +1215,7 @@ bn_power5: .align 32 bn_sqr8x_internal: __bn_sqr8x_internal: -.cfi_startproc +.cfi_startproc @@ -1990,12 +1990,12 @@ __bn_sqr8x_reduction: cmpq %rdx,%rdi jb .L8x_reduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_sqr8x_internal,.-bn_sqr8x_internal .type __bn_post4x_internal,@function .align 32 __bn_post4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx @@ -2046,18 +2046,18 @@ __bn_post4x_internal: movq %r9,%r10 negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_post4x_internal,.-__bn_post4x_internal .globl bn_from_montgomery .type bn_from_montgomery,@function .align 32 bn_from_montgomery: -.cfi_startproc +.cfi_startproc testl $7,%r9d jz bn_from_mont8x xorl %eax,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_from_montgomery,.-bn_from_montgomery .type bn_from_mont8x,@function @@ -2341,7 +2341,7 @@ bn_mulx4x_mont_gather5: .type mulx4x_internal,@function .align 32 mulx4x_internal: -.cfi_startproc +.cfi_startproc movq %r9,8(%rsp) movq %r9,%r10 negq %r9 @@ -2760,7 +2760,7 @@ mulx4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry -.cfi_endproc +.cfi_endproc .size mulx4x_internal,.-mulx4x_internal .type bn_powerx5,@function .align 32 @@ -3519,7 +3519,7 @@ __bn_sqrx8x_reduction: .size bn_sqrx8x_internal,.-bn_sqrx8x_internal .align 32 __bn_postx4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 @@ -3567,13 +3567,13 @@ __bn_postx4x_internal: negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_postx4x_internal,.-__bn_postx4x_internal .globl bn_get_bits5 .type bn_get_bits5,@function .align 16 bn_get_bits5: -.cfi_startproc +.cfi_startproc leaq 0(%rdi),%r10 leaq 1(%rdi),%r11 movl %esi,%ecx @@ -3587,14 +3587,14 @@ bn_get_bits5: shrl %cl,%eax andl $31,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_get_bits5,.-bn_get_bits5 .globl bn_scatter5 .type bn_scatter5,@function .align 16 bn_scatter5: -.cfi_startproc +.cfi_startproc cmpl $0,%esi jz .Lscatter_epilogue leaq (%rdx,%rcx,8),%rdx @@ -3607,7 +3607,7 @@ bn_scatter5: jnz .Lscatter .Lscatter_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_scatter5,.-bn_scatter5 .globl bn_gather5 @@ -3615,7 +3615,7 @@ bn_scatter5: .align 32 bn_gather5: .LSEH_begin_bn_gather5: -.cfi_startproc +.cfi_startproc .byte 0x4c,0x8d,0x14,0x24 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 @@ -3773,7 +3773,7 @@ bn_gather5: leaq (%r10),%rsp .byte 0xf3,0xc3 .LSEH_end_bn_gather5: -.cfi_endproc +.cfi_endproc .size bn_gather5,.-bn_gather5 .align 64 .Linc: diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/buildinf.h b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/buildinf.h index 1c5892985db2dc..87cb3a0a8f6502 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: solaris64-x86_64-gcc" -#define DATE "built on: Mon Mar 23 17:59:38 2020 UTC" +#define DATE "built on: Wed Apr 1 16:09:01 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/camellia/cmll-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/camellia/cmll-x86_64.s index eeb20dd2291da7..92056f8b1e0e0e 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/camellia/cmll-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/camellia/cmll-x86_64.s @@ -5,13 +5,13 @@ .type Camellia_EncryptBlock,@function .align 16 Camellia_EncryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Lenc_rounds -.cfi_endproc +.cfi_endproc .size Camellia_EncryptBlock,.-Camellia_EncryptBlock .globl Camellia_EncryptBlock_Rounds @@ -85,7 +85,7 @@ Camellia_EncryptBlock_Rounds: .type _x86_64_Camellia_encrypt,@function .align 16 _x86_64_Camellia_encrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -288,7 +288,7 @@ _x86_64_Camellia_encrypt: movl %edx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt @@ -296,13 +296,13 @@ _x86_64_Camellia_encrypt: .type Camellia_DecryptBlock,@function .align 16 Camellia_DecryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Ldec_rounds -.cfi_endproc +.cfi_endproc .size Camellia_DecryptBlock,.-Camellia_DecryptBlock .globl Camellia_DecryptBlock_Rounds @@ -376,7 +376,7 @@ Camellia_DecryptBlock_Rounds: .type _x86_64_Camellia_decrypt,@function .align 16 _x86_64_Camellia_decrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -580,7 +580,7 @@ _x86_64_Camellia_decrypt: movl %ebx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt .globl Camellia_Ekeygen .type Camellia_Ekeygen,@function diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/ec/ecp_nistz256-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/ec/ecp_nistz256-x86_64.s index 5c9e4050416212..80569cae04667e 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/ec/ecp_nistz256-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/ec/ecp_nistz256-x86_64.s @@ -3874,12 +3874,12 @@ ecp_nistz256_ord_sqr_montx: .type ecp_nistz256_to_mont,@function .align 32 ecp_nistz256_to_mont: -.cfi_startproc +.cfi_startproc movl $0x80100,%ecx andl OPENSSL_ia32cap_P+8(%rip),%ecx leaq .LRR(%rip),%rdx jmp .Lmul_mont -.cfi_endproc +.cfi_endproc .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont @@ -4823,7 +4823,7 @@ ecp_nistz256_from_mont: .type ecp_nistz256_scatter_w5,@function .align 32 ecp_nistz256_scatter_w5: -.cfi_startproc +.cfi_startproc leal -3(%rdx,%rdx,2),%edx movdqa 0(%rsi),%xmm0 shll $5,%edx @@ -4840,7 +4840,7 @@ ecp_nistz256_scatter_w5: movdqa %xmm5,80(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5 @@ -4914,7 +4914,7 @@ ecp_nistz256_gather_w5: .type ecp_nistz256_scatter_w7,@function .align 32 ecp_nistz256_scatter_w7: -.cfi_startproc +.cfi_startproc movdqu 0(%rsi),%xmm0 shll $6,%edx movdqu 16(%rsi),%xmm1 @@ -4926,7 +4926,7 @@ ecp_nistz256_scatter_w7: movdqa %xmm3,48(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/ec/x25519-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/ec/x25519-x86_64.s index 1788e568cda5d2..8fd319c83c880d 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/ec/x25519-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/ec/x25519-x86_64.s @@ -400,14 +400,14 @@ x25519_fe51_mul121666: .type x25519_fe64_eligible,@function .align 32 x25519_fe64_eligible: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+8(%rip),%ecx xorl %eax,%eax andl $0x80100,%ecx cmpl $0x80100,%ecx cmovel %ecx,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_eligible,.-x25519_fe64_eligible .globl x25519_fe64_mul @@ -650,7 +650,7 @@ x25519_fe64_sqr: .align 32 x25519_fe64_mul121666: .Lfe64_mul121666_body: -.cfi_startproc +.cfi_startproc movl $121666,%edx mulxq 0(%rsi),%r8,%rcx mulxq 8(%rsi),%r9,%rax @@ -679,7 +679,7 @@ x25519_fe64_mul121666: .Lfe64_mul121666_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_mul121666,.-x25519_fe64_mul121666 .globl x25519_fe64_add @@ -687,7 +687,7 @@ x25519_fe64_mul121666: .align 32 x25519_fe64_add: .Lfe64_add_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -716,7 +716,7 @@ x25519_fe64_add: .Lfe64_add_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_add,.-x25519_fe64_add .globl x25519_fe64_sub @@ -724,7 +724,7 @@ x25519_fe64_add: .align 32 x25519_fe64_sub: .Lfe64_sub_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -753,7 +753,7 @@ x25519_fe64_sub: .Lfe64_sub_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_sub,.-x25519_fe64_sub .globl x25519_fe64_tobytes @@ -761,7 +761,7 @@ x25519_fe64_sub: .align 32 x25519_fe64_tobytes: .Lfe64_to_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -797,6 +797,6 @@ x25519_fe64_tobytes: .Lfe64_to_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_tobytes,.-x25519_fe64_tobytes .byte 88,50,53,53,49,57,32,112,114,105,109,105,116,105,118,101,115,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/modes/aesni-gcm-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/modes/aesni-gcm-x86_64.s index 01d89630a42f73..bf508aff6ff6ec 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/modes/aesni-gcm-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/modes/aesni-gcm-x86_64.s @@ -3,7 +3,7 @@ .type _aesni_ctr32_ghash_6x,@function .align 32 _aesni_ctr32_ghash_6x: -.cfi_startproc +.cfi_startproc vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 @@ -311,7 +311,7 @@ _aesni_ctr32_ghash_6x: vpxor %xmm4,%xmm8,%xmm8 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x .globl aesni_gcm_decrypt .type aesni_gcm_decrypt,@function @@ -418,7 +418,7 @@ aesni_gcm_decrypt: .type _aesni_ctr32_6x,@function .align 32 _aesni_ctr32_6x: -.cfi_startproc +.cfi_startproc vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%rbp),%r13 @@ -505,7 +505,7 @@ _aesni_ctr32_6x: vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_6x,.-_aesni_ctr32_6x .globl aesni_gcm_encrypt diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/poly1305/poly1305-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/poly1305/poly1305-x86_64.s index 987a65aab38147..9bb9be4632f53c 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/poly1305/poly1305-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/poly1305/poly1305-x86_64.s @@ -12,7 +12,7 @@ .type poly1305_init,@function .align 32 poly1305_init: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -48,7 +48,7 @@ poly1305_init: movl $1,%eax .Lno_key: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init,.-poly1305_init .type poly1305_blocks,@function @@ -169,7 +169,7 @@ poly1305_blocks: .type poly1305_emit,@function .align 32 poly1305_emit: -.cfi_startproc +.cfi_startproc .Lemit: movq 0(%rdi),%r8 movq 8(%rdi),%r9 @@ -190,12 +190,12 @@ poly1305_emit: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit,.-poly1305_emit .type __poly1305_block,@function .align 32 __poly1305_block: -.cfi_startproc +.cfi_startproc mulq %r14 movq %rax,%r9 movq %r11,%rax @@ -235,13 +235,13 @@ __poly1305_block: adcq $0,%rbx adcq $0,%rbp .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_block,.-__poly1305_block .type __poly1305_init_avx,@function .align 32 __poly1305_init_avx: -.cfi_startproc +.cfi_startproc movq %r11,%r14 movq %r12,%rbx xorq %rbp,%rbp @@ -399,7 +399,7 @@ __poly1305_init_avx: leaq -48-64(%rdi),%rdi .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_init_avx,.-__poly1305_init_avx .type poly1305_blocks_avx,@function @@ -1240,7 +1240,7 @@ poly1305_blocks_avx: .type poly1305_emit_avx,@function .align 32 poly1305_emit_avx: -.cfi_startproc +.cfi_startproc cmpl $0,20(%rdi) je .Lemit @@ -1291,7 +1291,7 @@ poly1305_emit_avx: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_avx,.-poly1305_emit_avx .type poly1305_blocks_avx2,@function .align 32 @@ -2488,7 +2488,7 @@ poly1305_blocks_avx512: .type poly1305_init_base2_44,@function .align 32 poly1305_init_base2_44: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -2522,12 +2522,12 @@ poly1305_init_base2_44: movq %r11,8(%rdx) movl $1,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init_base2_44,.-poly1305_init_base2_44 .type poly1305_blocks_vpmadd52,@function .align 32 poly1305_blocks_vpmadd52: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52 @@ -2634,12 +2634,12 @@ poly1305_blocks_vpmadd52: .Lno_data_vpmadd52: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52 .type poly1305_blocks_vpmadd52_4x,@function .align 32 poly1305_blocks_vpmadd52_4x: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52_4x @@ -3064,12 +3064,12 @@ poly1305_blocks_vpmadd52_4x: .Lno_data_vpmadd52_4x: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x .type poly1305_blocks_vpmadd52_8x,@function .align 32 poly1305_blocks_vpmadd52_8x: -.cfi_startproc +.cfi_startproc shrq $4,%rdx jz .Lno_data_vpmadd52_8x @@ -3410,12 +3410,12 @@ poly1305_blocks_vpmadd52_8x: .Lno_data_vpmadd52_8x: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x .type poly1305_emit_base2_44,@function .align 32 poly1305_emit_base2_44: -.cfi_startproc +.cfi_startproc movq 0(%rdi),%r8 movq 8(%rdi),%r9 movq 16(%rdi),%r10 @@ -3446,7 +3446,7 @@ poly1305_emit_base2_44: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_base2_44,.-poly1305_emit_base2_44 .align 64 .Lconst: @@ -3485,7 +3485,7 @@ poly1305_emit_base2_44: .type xor128_encrypt_n_pad,@function .align 16 xor128_encrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -3527,14 +3527,14 @@ xor128_encrypt_n_pad: .Ldone_enc: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad .globl xor128_decrypt_n_pad .type xor128_decrypt_n_pad,@function .align 16 xor128_decrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -3580,5 +3580,5 @@ xor128_decrypt_n_pad: .Ldone_dec: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/rc4/rc4-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/rc4/rc4-x86_64.s index b97c757550aad0..d1d1eece70bf1e 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/rc4/rc4-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/rc4/rc4-x86_64.s @@ -5,7 +5,7 @@ .type RC4,@function .align 16 RC4: -.cfi_startproc +.cfi_startproc orq %rsi,%rsi jne .Lentry .byte 0xf3,0xc3 @@ -534,7 +534,7 @@ RC4: .type RC4_set_key,@function .align 16 RC4_set_key: -.cfi_startproc +.cfi_startproc leaq 8(%rdi),%rdi leaq (%rdx,%rsi,1),%rdx negq %rsi @@ -601,14 +601,14 @@ RC4_set_key: movl %eax,-8(%rdi) movl %eax,-4(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size RC4_set_key,.-RC4_set_key .globl RC4_options .type RC4_options,@function .align 16 RC4_options: -.cfi_startproc +.cfi_startproc leaq .Lopts(%rip),%rax movl OPENSSL_ia32cap_P(%rip),%edx btl $20,%edx @@ -621,7 +621,7 @@ RC4_options: addq $12,%rax .Ldone: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .align 64 .Lopts: .byte 114,99,52,40,56,120,44,105,110,116,41,0 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/sha/keccak1600-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/sha/keccak1600-x86_64.s index 09617d014bdb7b..11f26e933d80b5 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/sha/keccak1600-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/sha/keccak1600-x86_64.s @@ -3,7 +3,7 @@ .type __KeccakF1600,@function .align 32 __KeccakF1600: -.cfi_startproc +.cfi_startproc movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx @@ -256,7 +256,7 @@ __KeccakF1600: leaq -192(%r15),%r15 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __KeccakF1600,.-__KeccakF1600 .type KeccakF1600,@function diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha1-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha1-x86_64.s index 98541727e555da..d4efc7206f57b3 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha1-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha1-x86_64.s @@ -1422,7 +1422,7 @@ _shaext_shortcut: movdqu %xmm0,(%rdi) movd %xmm1,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext .type sha1_block_data_order_ssse3,@function .align 16 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha256-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha256-x86_64.s index 9357385da3c49b..a7b60900fdd061 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha256-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha256-x86_64.s @@ -1775,7 +1775,7 @@ K256: .align 64 sha256_block_data_order_shaext: _shaext_shortcut: -.cfi_startproc +.cfi_startproc leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 @@ -1978,7 +1978,7 @@ _shaext_shortcut: movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext .type sha256_block_data_order_ssse3,@function .align 64 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/x86_64cpuid.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/x86_64cpuid.s index 9268ce8c9a9d63..748e6d161fa24a 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/x86_64cpuid.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/crypto/x86_64cpuid.s @@ -12,7 +12,7 @@ .type OPENSSL_atomic_add,@function .align 16 OPENSSL_atomic_add: -.cfi_startproc +.cfi_startproc movl (%rdi),%eax .Lspin: leaq (%rsi,%rax,1),%r8 .byte 0xf0 @@ -21,19 +21,19 @@ OPENSSL_atomic_add: movl %r8d,%eax .byte 0x48,0x98 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_atomic_add,.-OPENSSL_atomic_add .globl OPENSSL_rdtsc .type OPENSSL_rdtsc,@function .align 16 OPENSSL_rdtsc: -.cfi_startproc +.cfi_startproc rdtsc shlq $32,%rdx orq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_rdtsc,.-OPENSSL_rdtsc .globl OPENSSL_ia32_cpuid @@ -209,7 +209,7 @@ OPENSSL_ia32_cpuid: .type OPENSSL_cleanse,@function .align 16 OPENSSL_cleanse: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $15,%rsi jae .Lot @@ -239,14 +239,14 @@ OPENSSL_cleanse: cmpq $0,%rsi jne .Little .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_cleanse,.-OPENSSL_cleanse .globl CRYPTO_memcmp .type CRYPTO_memcmp,@function .align 16 CRYPTO_memcmp: -.cfi_startproc +.cfi_startproc xorq %rax,%rax xorq %r10,%r10 cmpq $0,%rdx @@ -275,13 +275,13 @@ CRYPTO_memcmp: shrq $63,%rax .Lno_data: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size CRYPTO_memcmp,.-CRYPTO_memcmp .globl OPENSSL_wipe_cpu .type OPENSSL_wipe_cpu,@function .align 16 OPENSSL_wipe_cpu: -.cfi_startproc +.cfi_startproc pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 @@ -308,13 +308,13 @@ OPENSSL_wipe_cpu: xorq %r11,%r11 leaq 8(%rsp),%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu .globl OPENSSL_instrument_bus .type OPENSSL_instrument_bus,@function .align 16 OPENSSL_instrument_bus: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rsi,%r11 @@ -341,14 +341,14 @@ OPENSSL_instrument_bus: movq %r11,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus,.-OPENSSL_instrument_bus .globl OPENSSL_instrument_bus2 .type OPENSSL_instrument_bus2,@function .align 16 OPENSSL_instrument_bus2: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rdx,%r11 @@ -391,13 +391,13 @@ OPENSSL_instrument_bus2: movq 8(%rsp),%rax subq %rcx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus2,.-OPENSSL_instrument_bus2 .globl OPENSSL_ia32_rdrand_bytes .type OPENSSL_ia32_rdrand_bytes,@function .align 16 OPENSSL_ia32_rdrand_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdrand_bytes @@ -434,13 +434,13 @@ OPENSSL_ia32_rdrand_bytes: .Ldone_rdrand_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdrand_bytes,.-OPENSSL_ia32_rdrand_bytes .globl OPENSSL_ia32_rdseed_bytes .type OPENSSL_ia32_rdseed_bytes,@function .align 16 OPENSSL_ia32_rdseed_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdseed_bytes @@ -477,5 +477,5 @@ OPENSSL_ia32_rdseed_bytes: .Ldone_rdseed_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdseed_bytes,.-OPENSSL_ia32_rdseed_bytes diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/configdata.pm b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/configdata.pm index d2b94ec9e92b3b..06f2fa80a88f7e 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/configdata.pm +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/configdata.pm @@ -111,8 +111,8 @@ our %config = ( sourcedir => ".", target => "solaris64-x86_64-gcc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/aes/aesni-sha1-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/aes/aesni-sha1-x86_64.s index 978bd2b6239c15..a38e21f0484e24 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/aes/aesni-sha1-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/aes/aesni-sha1-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha1_enc,@function .align 32 aesni_cbc_sha1_enc: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+0(%rip),%r10d movq OPENSSL_ia32cap_P+4(%rip),%r11 @@ -18,7 +18,7 @@ aesni_cbc_sha1_enc: je aesni_cbc_sha1_enc_avx jmp aesni_cbc_sha1_enc_ssse3 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc .type aesni_cbc_sha1_enc_ssse3,@function .align 32 @@ -2732,7 +2732,7 @@ K_XX_XX: .type aesni_cbc_sha1_enc_shaext,@function .align 32 aesni_cbc_sha1_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 movdqu (%r9),%xmm8 movd 16(%r9),%xmm9 @@ -3031,5 +3031,5 @@ aesni_cbc_sha1_enc_shaext: movdqu %xmm8,(%r9) movd %xmm9,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/aes/aesni-sha256-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/aes/aesni-sha256-x86_64.s index dd09f1b290af62..3e56a82578a354 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/aes/aesni-sha256-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/aes/aesni-sha256-x86_64.s @@ -5,7 +5,7 @@ .type aesni_cbc_sha256_enc,@function .align 16 aesni_cbc_sha256_enc: -.cfi_startproc +.cfi_startproc leaq OPENSSL_ia32cap_P(%rip),%r11 movl $1,%eax cmpq $0,%rdi @@ -31,7 +31,7 @@ aesni_cbc_sha256_enc: ud2 .Lprobe: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc,.-aesni_cbc_sha256_enc .align 64 @@ -4081,7 +4081,7 @@ aesni_cbc_sha256_enc_avx2: .type aesni_cbc_sha256_enc_shaext,@function .align 32 aesni_cbc_sha256_enc_shaext: -.cfi_startproc +.cfi_startproc movq 8(%rsp),%r10 leaq K256+128(%rip),%rax movdqu (%r9),%xmm1 @@ -4431,5 +4431,5 @@ aesni_cbc_sha256_enc_shaext: movdqu %xmm1,(%r9) movdqu %xmm2,16(%r9) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_cbc_sha256_enc_shaext,.-aesni_cbc_sha256_enc_shaext diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/aes/aesni-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/aes/aesni-x86_64.s index c1e791eff59235..1a4b22e7b8a5d9 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/aes/aesni-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/aes/aesni-x86_64.s @@ -861,7 +861,7 @@ aesni_ecb_encrypt: .type aesni_ccm64_encrypt_blocks,@function .align 16 aesni_ccm64_encrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movdqu (%r8),%xmm6 movdqa .Lincrement64(%rip),%xmm9 @@ -920,13 +920,13 @@ aesni_ccm64_encrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks .globl aesni_ccm64_decrypt_blocks .type aesni_ccm64_decrypt_blocks,@function .align 16 aesni_ccm64_decrypt_blocks: -.cfi_startproc +.cfi_startproc movl 240(%rcx),%eax movups (%r8),%xmm6 movdqu (%r9),%xmm3 @@ -1019,7 +1019,7 @@ aesni_ccm64_decrypt_blocks: pxor %xmm8,%xmm8 pxor %xmm6,%xmm6 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks .globl aesni_ctr32_encrypt_blocks .type aesni_ctr32_encrypt_blocks,@function @@ -2794,7 +2794,7 @@ aesni_ocb_encrypt: .type __ocb_encrypt6,@function .align 32 __ocb_encrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2892,13 +2892,13 @@ __ocb_encrypt6: .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt6,.-__ocb_encrypt6 .type __ocb_encrypt4,@function .align 32 __ocb_encrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -2963,13 +2963,13 @@ __ocb_encrypt4: .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt4,.-__ocb_encrypt4 .type __ocb_encrypt1,@function .align 32 __ocb_encrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm2,%xmm8 @@ -3000,7 +3000,7 @@ __ocb_encrypt1: .byte 102,15,56,221,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_encrypt1,.-__ocb_encrypt1 .globl aesni_ocb_decrypt @@ -3243,7 +3243,7 @@ aesni_ocb_decrypt: .type __ocb_decrypt6,@function .align 32 __ocb_decrypt6: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3335,13 +3335,13 @@ __ocb_decrypt6: .byte 102,65,15,56,223,246 .byte 102,65,15,56,223,255 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt6,.-__ocb_decrypt6 .type __ocb_decrypt4,@function .align 32 __ocb_decrypt4: -.cfi_startproc +.cfi_startproc pxor %xmm9,%xmm15 movdqu (%rbx,%r12,1),%xmm11 movdqa %xmm10,%xmm12 @@ -3402,13 +3402,13 @@ __ocb_decrypt4: .byte 102,65,15,56,223,228 .byte 102,65,15,56,223,237 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt4,.-__ocb_decrypt4 .type __ocb_decrypt1,@function .align 32 __ocb_decrypt1: -.cfi_startproc +.cfi_startproc pxor %xmm15,%xmm7 pxor %xmm9,%xmm7 pxor %xmm7,%xmm2 @@ -3438,7 +3438,7 @@ __ocb_decrypt1: .byte 102,15,56,223,215 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __ocb_decrypt1,.-__ocb_decrypt1 .globl aesni_cbc_encrypt .type aesni_cbc_encrypt,@function @@ -4447,7 +4447,7 @@ __aesni_set_encrypt_key: shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size aesni_set_encrypt_key,.-aesni_set_encrypt_key .size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key .align 64 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/bn/rsaz-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/bn/rsaz-x86_64.s index 7876e0b8f93d9c..d5025b23cd668e 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/bn/rsaz-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/bn/rsaz-x86_64.s @@ -1453,7 +1453,7 @@ rsaz_512_mul_by_one: .type __rsaz_512_reduce,@function .align 32 __rsaz_512_reduce: -.cfi_startproc +.cfi_startproc movq %r8,%rbx imulq 128+8(%rsp),%rbx movq 0(%rbp),%rax @@ -1533,12 +1533,12 @@ __rsaz_512_reduce: jne .Lreduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reduce,.-__rsaz_512_reduce .type __rsaz_512_reducex,@function .align 32 __rsaz_512_reducex: -.cfi_startproc +.cfi_startproc imulq %r8,%rdx xorq %rsi,%rsi @@ -1591,12 +1591,12 @@ __rsaz_512_reducex: jne .Lreduction_loopx .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_reducex,.-__rsaz_512_reducex .type __rsaz_512_subtract,@function .align 32 __rsaz_512_subtract: -.cfi_startproc +.cfi_startproc movq %r8,(%rdi) movq %r9,8(%rdi) movq %r10,16(%rdi) @@ -1650,12 +1650,12 @@ __rsaz_512_subtract: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_subtract,.-__rsaz_512_subtract .type __rsaz_512_mul,@function .align 32 __rsaz_512_mul: -.cfi_startproc +.cfi_startproc leaq 8(%rsp),%rdi movq (%rsi),%rax @@ -1794,12 +1794,12 @@ __rsaz_512_mul: movq %r15,56(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mul,.-__rsaz_512_mul .type __rsaz_512_mulx,@function .align 32 __rsaz_512_mulx: -.cfi_startproc +.cfi_startproc mulxq (%rsi),%rbx,%r8 movq $-6,%rcx @@ -1916,13 +1916,13 @@ __rsaz_512_mulx: movq %r15,8+64+56(%rsp) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __rsaz_512_mulx,.-__rsaz_512_mulx .globl rsaz_512_scatter4 .type rsaz_512_scatter4,@function .align 16 rsaz_512_scatter4: -.cfi_startproc +.cfi_startproc leaq (%rdi,%rdx,8),%rdi movl $8,%r9d jmp .Loop_scatter @@ -1935,14 +1935,14 @@ rsaz_512_scatter4: decl %r9d jnz .Loop_scatter .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size rsaz_512_scatter4,.-rsaz_512_scatter4 .globl rsaz_512_gather4 .type rsaz_512_gather4,@function .align 16 rsaz_512_gather4: -.cfi_startproc +.cfi_startproc movd %edx,%xmm8 movdqa .Linc+16(%rip),%xmm1 movdqa .Linc(%rip),%xmm0 @@ -2006,7 +2006,7 @@ rsaz_512_gather4: jnz .Loop_gather .byte 0xf3,0xc3 .LSEH_end_rsaz_512_gather4: -.cfi_endproc +.cfi_endproc .size rsaz_512_gather4,.-rsaz_512_gather4 .align 64 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/bn/x86_64-mont5.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/bn/x86_64-mont5.s index 40a60a3c8fc6b9..ab93b02d8c1aae 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/bn/x86_64-mont5.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/bn/x86_64-mont5.s @@ -550,7 +550,7 @@ bn_mul4x_mont_gather5: .type mul4x_internal,@function .align 32 mul4x_internal: -.cfi_startproc +.cfi_startproc shlq $5,%r9 movd 8(%rax),%xmm5 leaq .Linc(%rip),%rax @@ -1072,7 +1072,7 @@ mul4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry -.cfi_endproc +.cfi_endproc .size mul4x_internal,.-mul4x_internal .globl bn_power5 .type bn_power5,@function @@ -1215,7 +1215,7 @@ bn_power5: .align 32 bn_sqr8x_internal: __bn_sqr8x_internal: -.cfi_startproc +.cfi_startproc @@ -1990,12 +1990,12 @@ __bn_sqr8x_reduction: cmpq %rdx,%rdi jb .L8x_reduction_loop .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_sqr8x_internal,.-bn_sqr8x_internal .type __bn_post4x_internal,@function .align 32 __bn_post4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx @@ -2046,18 +2046,18 @@ __bn_post4x_internal: movq %r9,%r10 negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_post4x_internal,.-__bn_post4x_internal .globl bn_from_montgomery .type bn_from_montgomery,@function .align 32 bn_from_montgomery: -.cfi_startproc +.cfi_startproc testl $7,%r9d jz bn_from_mont8x xorl %eax,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_from_montgomery,.-bn_from_montgomery .type bn_from_mont8x,@function @@ -2341,7 +2341,7 @@ bn_mulx4x_mont_gather5: .type mulx4x_internal,@function .align 32 mulx4x_internal: -.cfi_startproc +.cfi_startproc movq %r9,8(%rsp) movq %r9,%r10 negq %r9 @@ -2760,7 +2760,7 @@ mulx4x_internal: movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry -.cfi_endproc +.cfi_endproc .size mulx4x_internal,.-mulx4x_internal .type bn_powerx5,@function .align 32 @@ -3519,7 +3519,7 @@ __bn_sqrx8x_reduction: .size bn_sqrx8x_internal,.-bn_sqrx8x_internal .align 32 __bn_postx4x_internal: -.cfi_startproc +.cfi_startproc movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 @@ -3567,13 +3567,13 @@ __bn_postx4x_internal: negq %r9 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __bn_postx4x_internal,.-__bn_postx4x_internal .globl bn_get_bits5 .type bn_get_bits5,@function .align 16 bn_get_bits5: -.cfi_startproc +.cfi_startproc leaq 0(%rdi),%r10 leaq 1(%rdi),%r11 movl %esi,%ecx @@ -3587,14 +3587,14 @@ bn_get_bits5: shrl %cl,%eax andl $31,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_get_bits5,.-bn_get_bits5 .globl bn_scatter5 .type bn_scatter5,@function .align 16 bn_scatter5: -.cfi_startproc +.cfi_startproc cmpl $0,%esi jz .Lscatter_epilogue leaq (%rdx,%rcx,8),%rdx @@ -3607,7 +3607,7 @@ bn_scatter5: jnz .Lscatter .Lscatter_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size bn_scatter5,.-bn_scatter5 .globl bn_gather5 @@ -3615,7 +3615,7 @@ bn_scatter5: .align 32 bn_gather5: .LSEH_begin_bn_gather5: -.cfi_startproc +.cfi_startproc .byte 0x4c,0x8d,0x14,0x24 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 @@ -3773,7 +3773,7 @@ bn_gather5: leaq (%r10),%rsp .byte 0xf3,0xc3 .LSEH_end_bn_gather5: -.cfi_endproc +.cfi_endproc .size bn_gather5,.-bn_gather5 .align 64 .Linc: diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/buildinf.h b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/buildinf.h index 8a833f666c0da9..eb25b333c1eb0d 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/buildinf.h +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: solaris64-x86_64-gcc" -#define DATE "built on: Mon Mar 23 18:00:19 2020 UTC" +#define DATE "built on: Wed Apr 1 16:09:10 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/camellia/cmll-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/camellia/cmll-x86_64.s index eeb20dd2291da7..92056f8b1e0e0e 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/camellia/cmll-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/camellia/cmll-x86_64.s @@ -5,13 +5,13 @@ .type Camellia_EncryptBlock,@function .align 16 Camellia_EncryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Lenc_rounds -.cfi_endproc +.cfi_endproc .size Camellia_EncryptBlock,.-Camellia_EncryptBlock .globl Camellia_EncryptBlock_Rounds @@ -85,7 +85,7 @@ Camellia_EncryptBlock_Rounds: .type _x86_64_Camellia_encrypt,@function .align 16 _x86_64_Camellia_encrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -288,7 +288,7 @@ _x86_64_Camellia_encrypt: movl %edx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt @@ -296,13 +296,13 @@ _x86_64_Camellia_encrypt: .type Camellia_DecryptBlock,@function .align 16 Camellia_DecryptBlock: -.cfi_startproc +.cfi_startproc movl $128,%eax subl %edi,%eax movl $3,%edi adcl $0,%edi jmp .Ldec_rounds -.cfi_endproc +.cfi_endproc .size Camellia_DecryptBlock,.-Camellia_DecryptBlock .globl Camellia_DecryptBlock_Rounds @@ -376,7 +376,7 @@ Camellia_DecryptBlock_Rounds: .type _x86_64_Camellia_decrypt,@function .align 16 _x86_64_Camellia_decrypt: -.cfi_startproc +.cfi_startproc xorl 0(%r14),%r9d xorl 4(%r14),%r8d xorl 8(%r14),%r11d @@ -580,7 +580,7 @@ _x86_64_Camellia_decrypt: movl %ebx,%r11d .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt .globl Camellia_Ekeygen .type Camellia_Ekeygen,@function diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s index 5c9e4050416212..80569cae04667e 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/ec/ecp_nistz256-x86_64.s @@ -3874,12 +3874,12 @@ ecp_nistz256_ord_sqr_montx: .type ecp_nistz256_to_mont,@function .align 32 ecp_nistz256_to_mont: -.cfi_startproc +.cfi_startproc movl $0x80100,%ecx andl OPENSSL_ia32cap_P+8(%rip),%ecx leaq .LRR(%rip),%rdx jmp .Lmul_mont -.cfi_endproc +.cfi_endproc .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont @@ -4823,7 +4823,7 @@ ecp_nistz256_from_mont: .type ecp_nistz256_scatter_w5,@function .align 32 ecp_nistz256_scatter_w5: -.cfi_startproc +.cfi_startproc leal -3(%rdx,%rdx,2),%edx movdqa 0(%rsi),%xmm0 shll $5,%edx @@ -4840,7 +4840,7 @@ ecp_nistz256_scatter_w5: movdqa %xmm5,80(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5 @@ -4914,7 +4914,7 @@ ecp_nistz256_gather_w5: .type ecp_nistz256_scatter_w7,@function .align 32 ecp_nistz256_scatter_w7: -.cfi_startproc +.cfi_startproc movdqu 0(%rsi),%xmm0 shll $6,%edx movdqu 16(%rsi),%xmm1 @@ -4926,7 +4926,7 @@ ecp_nistz256_scatter_w7: movdqa %xmm3,48(%rdi,%rdx,1) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/ec/x25519-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/ec/x25519-x86_64.s index 1788e568cda5d2..8fd319c83c880d 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/ec/x25519-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/ec/x25519-x86_64.s @@ -400,14 +400,14 @@ x25519_fe51_mul121666: .type x25519_fe64_eligible,@function .align 32 x25519_fe64_eligible: -.cfi_startproc +.cfi_startproc movl OPENSSL_ia32cap_P+8(%rip),%ecx xorl %eax,%eax andl $0x80100,%ecx cmpl $0x80100,%ecx cmovel %ecx,%eax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_eligible,.-x25519_fe64_eligible .globl x25519_fe64_mul @@ -650,7 +650,7 @@ x25519_fe64_sqr: .align 32 x25519_fe64_mul121666: .Lfe64_mul121666_body: -.cfi_startproc +.cfi_startproc movl $121666,%edx mulxq 0(%rsi),%r8,%rcx mulxq 8(%rsi),%r9,%rax @@ -679,7 +679,7 @@ x25519_fe64_mul121666: .Lfe64_mul121666_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_mul121666,.-x25519_fe64_mul121666 .globl x25519_fe64_add @@ -687,7 +687,7 @@ x25519_fe64_mul121666: .align 32 x25519_fe64_add: .Lfe64_add_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -716,7 +716,7 @@ x25519_fe64_add: .Lfe64_add_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_add,.-x25519_fe64_add .globl x25519_fe64_sub @@ -724,7 +724,7 @@ x25519_fe64_add: .align 32 x25519_fe64_sub: .Lfe64_sub_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -753,7 +753,7 @@ x25519_fe64_sub: .Lfe64_sub_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_sub,.-x25519_fe64_sub .globl x25519_fe64_tobytes @@ -761,7 +761,7 @@ x25519_fe64_sub: .align 32 x25519_fe64_tobytes: .Lfe64_to_body: -.cfi_startproc +.cfi_startproc movq 0(%rsi),%r8 movq 8(%rsi),%r9 movq 16(%rsi),%r10 @@ -797,6 +797,6 @@ x25519_fe64_tobytes: .Lfe64_to_epilogue: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size x25519_fe64_tobytes,.-x25519_fe64_tobytes .byte 88,50,53,53,49,57,32,112,114,105,109,105,116,105,118,101,115,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/modes/aesni-gcm-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/modes/aesni-gcm-x86_64.s index 01d89630a42f73..bf508aff6ff6ec 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/modes/aesni-gcm-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/modes/aesni-gcm-x86_64.s @@ -3,7 +3,7 @@ .type _aesni_ctr32_ghash_6x,@function .align 32 _aesni_ctr32_ghash_6x: -.cfi_startproc +.cfi_startproc vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 @@ -311,7 +311,7 @@ _aesni_ctr32_ghash_6x: vpxor %xmm4,%xmm8,%xmm8 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x .globl aesni_gcm_decrypt .type aesni_gcm_decrypt,@function @@ -418,7 +418,7 @@ aesni_gcm_decrypt: .type _aesni_ctr32_6x,@function .align 32 _aesni_ctr32_6x: -.cfi_startproc +.cfi_startproc vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%rbp),%r13 @@ -505,7 +505,7 @@ _aesni_ctr32_6x: vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 -.cfi_endproc +.cfi_endproc .size _aesni_ctr32_6x,.-_aesni_ctr32_6x .globl aesni_gcm_encrypt diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/poly1305/poly1305-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/poly1305/poly1305-x86_64.s index 2636c52bbe5e48..8f2554e047f1bf 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/poly1305/poly1305-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/poly1305/poly1305-x86_64.s @@ -12,7 +12,7 @@ .type poly1305_init,@function .align 32 poly1305_init: -.cfi_startproc +.cfi_startproc xorq %rax,%rax movq %rax,0(%rdi) movq %rax,8(%rdi) @@ -43,7 +43,7 @@ poly1305_init: movl $1,%eax .Lno_key: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_init,.-poly1305_init .type poly1305_blocks,@function @@ -164,7 +164,7 @@ poly1305_blocks: .type poly1305_emit,@function .align 32 poly1305_emit: -.cfi_startproc +.cfi_startproc .Lemit: movq 0(%rdi),%r8 movq 8(%rdi),%r9 @@ -185,12 +185,12 @@ poly1305_emit: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit,.-poly1305_emit .type __poly1305_block,@function .align 32 __poly1305_block: -.cfi_startproc +.cfi_startproc mulq %r14 movq %rax,%r9 movq %r11,%rax @@ -230,13 +230,13 @@ __poly1305_block: adcq $0,%rbx adcq $0,%rbp .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_block,.-__poly1305_block .type __poly1305_init_avx,@function .align 32 __poly1305_init_avx: -.cfi_startproc +.cfi_startproc movq %r11,%r14 movq %r12,%rbx xorq %rbp,%rbp @@ -394,7 +394,7 @@ __poly1305_init_avx: leaq -48-64(%rdi),%rdi .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __poly1305_init_avx,.-__poly1305_init_avx .type poly1305_blocks_avx,@function @@ -1235,7 +1235,7 @@ poly1305_blocks_avx: .type poly1305_emit_avx,@function .align 32 poly1305_emit_avx: -.cfi_startproc +.cfi_startproc cmpl $0,20(%rdi) je .Lemit @@ -1286,7 +1286,7 @@ poly1305_emit_avx: movq %rcx,8(%rsi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size poly1305_emit_avx,.-poly1305_emit_avx .type poly1305_blocks_avx2,@function .align 32 @@ -1969,7 +1969,7 @@ poly1305_blocks_avx2: .type xor128_encrypt_n_pad,@function .align 16 xor128_encrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -2011,14 +2011,14 @@ xor128_encrypt_n_pad: .Ldone_enc: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad .globl xor128_decrypt_n_pad .type xor128_decrypt_n_pad,@function .align 16 xor128_decrypt_n_pad: -.cfi_startproc +.cfi_startproc subq %rdx,%rsi subq %rdx,%rdi movq %rcx,%r10 @@ -2064,5 +2064,5 @@ xor128_decrypt_n_pad: .Ldone_dec: movq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/rc4/rc4-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/rc4/rc4-x86_64.s index b97c757550aad0..d1d1eece70bf1e 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/rc4/rc4-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/rc4/rc4-x86_64.s @@ -5,7 +5,7 @@ .type RC4,@function .align 16 RC4: -.cfi_startproc +.cfi_startproc orq %rsi,%rsi jne .Lentry .byte 0xf3,0xc3 @@ -534,7 +534,7 @@ RC4: .type RC4_set_key,@function .align 16 RC4_set_key: -.cfi_startproc +.cfi_startproc leaq 8(%rdi),%rdi leaq (%rdx,%rsi,1),%rdx negq %rsi @@ -601,14 +601,14 @@ RC4_set_key: movl %eax,-8(%rdi) movl %eax,-4(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size RC4_set_key,.-RC4_set_key .globl RC4_options .type RC4_options,@function .align 16 RC4_options: -.cfi_startproc +.cfi_startproc leaq .Lopts(%rip),%rax movl OPENSSL_ia32cap_P(%rip),%edx btl $20,%edx @@ -621,7 +621,7 @@ RC4_options: addq $12,%rax .Ldone: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .align 64 .Lopts: .byte 114,99,52,40,56,120,44,105,110,116,41,0 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/sha/keccak1600-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/sha/keccak1600-x86_64.s index 09617d014bdb7b..11f26e933d80b5 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/sha/keccak1600-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/sha/keccak1600-x86_64.s @@ -3,7 +3,7 @@ .type __KeccakF1600,@function .align 32 __KeccakF1600: -.cfi_startproc +.cfi_startproc movq 60(%rdi),%rax movq 68(%rdi),%rbx movq 76(%rdi),%rcx @@ -256,7 +256,7 @@ __KeccakF1600: leaq -192(%r15),%r15 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size __KeccakF1600,.-__KeccakF1600 .type KeccakF1600,@function diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/sha/sha1-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/sha/sha1-x86_64.s index 98541727e555da..d4efc7206f57b3 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/sha/sha1-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/sha/sha1-x86_64.s @@ -1422,7 +1422,7 @@ _shaext_shortcut: movdqu %xmm0,(%rdi) movd %xmm1,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext .type sha1_block_data_order_ssse3,@function .align 16 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/sha/sha256-x86_64.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/sha/sha256-x86_64.s index 9357385da3c49b..a7b60900fdd061 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/sha/sha256-x86_64.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/sha/sha256-x86_64.s @@ -1775,7 +1775,7 @@ K256: .align 64 sha256_block_data_order_shaext: _shaext_shortcut: -.cfi_startproc +.cfi_startproc leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 @@ -1978,7 +1978,7 @@ _shaext_shortcut: movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext .type sha256_block_data_order_ssse3,@function .align 64 diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/x86_64cpuid.s b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/x86_64cpuid.s index 9268ce8c9a9d63..748e6d161fa24a 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/x86_64cpuid.s +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/asm_avx2/crypto/x86_64cpuid.s @@ -12,7 +12,7 @@ .type OPENSSL_atomic_add,@function .align 16 OPENSSL_atomic_add: -.cfi_startproc +.cfi_startproc movl (%rdi),%eax .Lspin: leaq (%rsi,%rax,1),%r8 .byte 0xf0 @@ -21,19 +21,19 @@ OPENSSL_atomic_add: movl %r8d,%eax .byte 0x48,0x98 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_atomic_add,.-OPENSSL_atomic_add .globl OPENSSL_rdtsc .type OPENSSL_rdtsc,@function .align 16 OPENSSL_rdtsc: -.cfi_startproc +.cfi_startproc rdtsc shlq $32,%rdx orq %rdx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_rdtsc,.-OPENSSL_rdtsc .globl OPENSSL_ia32_cpuid @@ -209,7 +209,7 @@ OPENSSL_ia32_cpuid: .type OPENSSL_cleanse,@function .align 16 OPENSSL_cleanse: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $15,%rsi jae .Lot @@ -239,14 +239,14 @@ OPENSSL_cleanse: cmpq $0,%rsi jne .Little .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_cleanse,.-OPENSSL_cleanse .globl CRYPTO_memcmp .type CRYPTO_memcmp,@function .align 16 CRYPTO_memcmp: -.cfi_startproc +.cfi_startproc xorq %rax,%rax xorq %r10,%r10 cmpq $0,%rdx @@ -275,13 +275,13 @@ CRYPTO_memcmp: shrq $63,%rax .Lno_data: .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size CRYPTO_memcmp,.-CRYPTO_memcmp .globl OPENSSL_wipe_cpu .type OPENSSL_wipe_cpu,@function .align 16 OPENSSL_wipe_cpu: -.cfi_startproc +.cfi_startproc pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 @@ -308,13 +308,13 @@ OPENSSL_wipe_cpu: xorq %r11,%r11 leaq 8(%rsp),%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu .globl OPENSSL_instrument_bus .type OPENSSL_instrument_bus,@function .align 16 OPENSSL_instrument_bus: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rsi,%r11 @@ -341,14 +341,14 @@ OPENSSL_instrument_bus: movq %r11,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus,.-OPENSSL_instrument_bus .globl OPENSSL_instrument_bus2 .type OPENSSL_instrument_bus2,@function .align 16 OPENSSL_instrument_bus2: -.cfi_startproc +.cfi_startproc movq %rdi,%r10 movq %rsi,%rcx movq %rdx,%r11 @@ -391,13 +391,13 @@ OPENSSL_instrument_bus2: movq 8(%rsp),%rax subq %rcx,%rax .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_instrument_bus2,.-OPENSSL_instrument_bus2 .globl OPENSSL_ia32_rdrand_bytes .type OPENSSL_ia32_rdrand_bytes,@function .align 16 OPENSSL_ia32_rdrand_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdrand_bytes @@ -434,13 +434,13 @@ OPENSSL_ia32_rdrand_bytes: .Ldone_rdrand_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdrand_bytes,.-OPENSSL_ia32_rdrand_bytes .globl OPENSSL_ia32_rdseed_bytes .type OPENSSL_ia32_rdseed_bytes,@function .align 16 OPENSSL_ia32_rdseed_bytes: -.cfi_startproc +.cfi_startproc xorq %rax,%rax cmpq $0,%rsi je .Ldone_rdseed_bytes @@ -477,5 +477,5 @@ OPENSSL_ia32_rdseed_bytes: .Ldone_rdseed_bytes: xorq %r10,%r10 .byte 0xf3,0xc3 -.cfi_endproc +.cfi_endproc .size OPENSSL_ia32_rdseed_bytes,.-OPENSSL_ia32_rdseed_bytes diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/no-asm/configdata.pm b/deps/openssl/config/archs/solaris64-x86_64-gcc/no-asm/configdata.pm index c4415b487e4e90..e898532a6d069c 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/no-asm/configdata.pm +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/no-asm/configdata.pm @@ -110,8 +110,8 @@ our %config = ( sourcedir => ".", target => "solaris64-x86_64-gcc", tdirs => [ "ossl_shim" ], - version => "1.1.1e", - version_num => "0x1010105fL", + version => "1.1.1f", + version_num => "0x1010106fL", ); our %target = ( diff --git a/deps/openssl/config/archs/solaris64-x86_64-gcc/no-asm/crypto/buildinf.h b/deps/openssl/config/archs/solaris64-x86_64-gcc/no-asm/crypto/buildinf.h index 87a94aa6b95807..1e3821b04f9c81 100644 --- a/deps/openssl/config/archs/solaris64-x86_64-gcc/no-asm/crypto/buildinf.h +++ b/deps/openssl/config/archs/solaris64-x86_64-gcc/no-asm/crypto/buildinf.h @@ -11,7 +11,7 @@ */ #define PLATFORM "platform: solaris64-x86_64-gcc" -#define DATE "built on: Mon Mar 23 18:00:43 2020 UTC" +#define DATE "built on: Wed Apr 1 16:09:19 2020 UTC" /* * Generate compiler_flags as an array of individual characters. This is a diff --git a/deps/openssl/openssl/CHANGES b/deps/openssl/openssl/CHANGES index 0250e4ef026bf7..f4230aaac03185 100644 --- a/deps/openssl/openssl/CHANGES +++ b/deps/openssl/openssl/CHANGES @@ -7,6 +7,24 @@ https://github.com/openssl/openssl/commits/ and pick the appropriate release branch. + Changes between 1.1.1e and 1.1.1f [31 Mar 2020] + + *) Revert the change of EOF detection while reading in libssl to avoid + regressions in applications depending on the current way of reporting + the EOF. As the existing method is not fully accurate the change to + reporting the EOF via SSL_ERROR_SSL is kept on the current development + branch and will be present in the 3.0 release. + [Tomas Mraz] + + *) Revised BN_generate_prime_ex to not avoid factors 3..17863 in p-1 + when primes for RSA keys are computed. + Since we previously always generated primes == 2 (mod 3) for RSA keys, + the 2-prime and 3-prime RSA modules were easy to distinguish, since + N = p*q = 1 (mod 3), but N = p*q*r = 2 (mod 3). Therefore fingerprinting + 2-prime vs. 3-prime RSA keys was possible by computing N mod 3. + This avoids possible fingerprinting of newly generated RSA modules. + [Bernd Edlinger] + Changes between 1.1.1d and 1.1.1e [17 Mar 2020] *) Properly detect EOF while reading in libssl. Previously if we hit an EOF while reading in libssl then we would report an error back to the diff --git a/deps/openssl/openssl/Configurations/unix-Makefile.tmpl b/deps/openssl/openssl/Configurations/unix-Makefile.tmpl index 9bf54f21277fcb..3a24d551359bd0 100644 --- a/deps/openssl/openssl/Configurations/unix-Makefile.tmpl +++ b/deps/openssl/openssl/Configurations/unix-Makefile.tmpl @@ -547,78 +547,78 @@ uninstall_sw: uninstall_runtime uninstall_engines uninstall_dev install_docs: install_man_docs install_html_docs uninstall_docs: uninstall_man_docs uninstall_html_docs - $(RM) -r $(DESTDIR)$(DOCDIR) + $(RM) -r "$(DESTDIR)$(DOCDIR)" install_ssldirs: - @$(PERL) $(SRCDIR)/util/mkdir-p.pl $(DESTDIR)$(OPENSSLDIR)/certs - @$(PERL) $(SRCDIR)/util/mkdir-p.pl $(DESTDIR)$(OPENSSLDIR)/private - @$(PERL) $(SRCDIR)/util/mkdir-p.pl $(DESTDIR)$(OPENSSLDIR)/misc + @$(PERL) $(SRCDIR)/util/mkdir-p.pl "$(DESTDIR)$(OPENSSLDIR)/certs" + @$(PERL) $(SRCDIR)/util/mkdir-p.pl "$(DESTDIR)$(OPENSSLDIR)/private" + @$(PERL) $(SRCDIR)/util/mkdir-p.pl "$(DESTDIR)$(OPENSSLDIR)/misc" @set -e; for x in dummy $(MISC_SCRIPTS); do \ if [ "$$x" = "dummy" ]; then continue; fi; \ x1=`echo "$$x" | cut -f1 -d:`; \ x2=`echo "$$x" | cut -f2 -d:`; \ fn=`basename $$x1`; \ $(ECHO) "install $$x1 -> $(DESTDIR)$(OPENSSLDIR)/misc/$$fn"; \ - cp $$x1 $(DESTDIR)$(OPENSSLDIR)/misc/$$fn.new; \ - chmod 755 $(DESTDIR)$(OPENSSLDIR)/misc/$$fn.new; \ - mv -f $(DESTDIR)$(OPENSSLDIR)/misc/$$fn.new \ - $(DESTDIR)$(OPENSSLDIR)/misc/$$fn; \ + cp $$x1 "$(DESTDIR)$(OPENSSLDIR)/misc/$$fn.new"; \ + chmod 755 "$(DESTDIR)$(OPENSSLDIR)/misc/$$fn.new"; \ + mv -f "$(DESTDIR)$(OPENSSLDIR)/misc/$$fn.new" \ + "$(DESTDIR)$(OPENSSLDIR)/misc/$$fn"; \ if [ "$$x1" != "$$x2" ]; then \ ln=`basename "$$x2"`; \ : {- output_off() unless windowsdll(); "" -}; \ $(ECHO) "copy $(DESTDIR)$(OPENSSLDIR)/misc/$$ln -> $(DESTDIR)$(OPENSSLDIR)/misc/$$fn"; \ - cp $(DESTDIR)$(OPENSSLDIR)/misc/$$fn $(DESTDIR)$(OPENSSLDIR)/misc/$$ln; \ + cp "$(DESTDIR)$(OPENSSLDIR)/misc/$$fn" "$(DESTDIR)$(OPENSSLDIR)/misc/$$ln"; \ : {- output_on() unless windowsdll(); output_off() if windowsdll(); "" -}; \ $(ECHO) "link $(DESTDIR)$(OPENSSLDIR)/misc/$$ln -> $(DESTDIR)$(OPENSSLDIR)/misc/$$fn"; \ - ln -sf $$fn $(DESTDIR)$(OPENSSLDIR)/misc/$$ln; \ + ln -sf $$fn "$(DESTDIR)$(OPENSSLDIR)/misc/$$ln"; \ : {- output_on() if windowsdll(); "" -}; \ fi; \ done @$(ECHO) "install $(SRCDIR)/apps/openssl.cnf -> $(DESTDIR)$(OPENSSLDIR)/openssl.cnf.dist" - @cp $(SRCDIR)/apps/openssl.cnf $(DESTDIR)$(OPENSSLDIR)/openssl.cnf.new - @chmod 644 $(DESTDIR)$(OPENSSLDIR)/openssl.cnf.new - @mv -f $(DESTDIR)$(OPENSSLDIR)/openssl.cnf.new $(DESTDIR)$(OPENSSLDIR)/openssl.cnf.dist + @cp $(SRCDIR)/apps/openssl.cnf "$(DESTDIR)$(OPENSSLDIR)/openssl.cnf.new" + @chmod 644 "$(DESTDIR)$(OPENSSLDIR)/openssl.cnf.new" + @mv -f "$(DESTDIR)$(OPENSSLDIR)/openssl.cnf.new" "$(DESTDIR)$(OPENSSLDIR)/openssl.cnf.dist" @if [ ! -f "$(DESTDIR)$(OPENSSLDIR)/openssl.cnf" ]; then \ $(ECHO) "install $(SRCDIR)/apps/openssl.cnf -> $(DESTDIR)$(OPENSSLDIR)/openssl.cnf"; \ - cp $(SRCDIR)/apps/openssl.cnf $(DESTDIR)$(OPENSSLDIR)/openssl.cnf; \ - chmod 644 $(DESTDIR)$(OPENSSLDIR)/openssl.cnf; \ + cp $(SRCDIR)/apps/openssl.cnf "$(DESTDIR)$(OPENSSLDIR)/openssl.cnf"; \ + chmod 644 "$(DESTDIR)$(OPENSSLDIR)/openssl.cnf"; \ fi @$(ECHO) "install $(SRCDIR)/apps/ct_log_list.cnf -> $(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf.dist" - @cp $(SRCDIR)/apps/ct_log_list.cnf $(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf.new - @chmod 644 $(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf.new - @mv -f $(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf.new $(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf.dist + @cp $(SRCDIR)/apps/ct_log_list.cnf "$(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf.new" + @chmod 644 "$(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf.new" + @mv -f "$(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf.new" "$(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf.dist" @if [ ! -f "$(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf" ]; then \ $(ECHO) "install $(SRCDIR)/apps/ct_log_list.cnf -> $(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf"; \ - cp $(SRCDIR)/apps/ct_log_list.cnf $(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf; \ - chmod 644 $(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf; \ + cp $(SRCDIR)/apps/ct_log_list.cnf "$(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf"; \ + chmod 644 "$(DESTDIR)$(OPENSSLDIR)/ct_log_list.cnf"; \ fi install_dev: install_runtime_libs @[ -n "$(INSTALLTOP)" ] || (echo INSTALLTOP should not be empty; exit 1) @$(ECHO) "*** Installing development files" - @$(PERL) $(SRCDIR)/util/mkdir-p.pl $(DESTDIR)$(INSTALLTOP)/include/openssl + @$(PERL) $(SRCDIR)/util/mkdir-p.pl "$(DESTDIR)$(INSTALLTOP)/include/openssl" @ : {- output_off() unless grep { $_ eq "OPENSSL_USE_APPLINK" } (@{$target{defines}}, @{$config{defines}}); "" -} @$(ECHO) "install $(SRCDIR)/ms/applink.c -> $(DESTDIR)$(INSTALLTOP)/include/openssl/applink.c" - @cp $(SRCDIR)/ms/applink.c $(DESTDIR)$(INSTALLTOP)/include/openssl/applink.c - @chmod 644 $(DESTDIR)$(INSTALLTOP)/include/openssl/applink.c + @cp $(SRCDIR)/ms/applink.c "$(DESTDIR)$(INSTALLTOP)/include/openssl/applink.c" + @chmod 644 "$(DESTDIR)$(INSTALLTOP)/include/openssl/applink.c" @ : {- output_on() unless grep { $_ eq "OPENSSL_USE_APPLINK" } (@{$target{defines}}, @{$config{defines}}); "" -} @set -e; for i in $(SRCDIR)/include/openssl/*.h \ $(BLDDIR)/include/openssl/*.h; do \ fn=`basename $$i`; \ $(ECHO) "install $$i -> $(DESTDIR)$(INSTALLTOP)/include/openssl/$$fn"; \ - cp $$i $(DESTDIR)$(INSTALLTOP)/include/openssl/$$fn; \ - chmod 644 $(DESTDIR)$(INSTALLTOP)/include/openssl/$$fn; \ + cp $$i "$(DESTDIR)$(INSTALLTOP)/include/openssl/$$fn"; \ + chmod 644 "$(DESTDIR)$(INSTALLTOP)/include/openssl/$$fn"; \ done - @$(PERL) $(SRCDIR)/util/mkdir-p.pl $(DESTDIR)$(libdir) + @$(PERL) $(SRCDIR)/util/mkdir-p.pl "$(DESTDIR)$(libdir)" @set -e; for l in $(INSTALL_LIBS); do \ fn=`basename $$l`; \ $(ECHO) "install $$l -> $(DESTDIR)$(libdir)/$$fn"; \ - cp $$l $(DESTDIR)$(libdir)/$$fn.new; \ - $(RANLIB) $(DESTDIR)$(libdir)/$$fn.new; \ - chmod 644 $(DESTDIR)$(libdir)/$$fn.new; \ - mv -f $(DESTDIR)$(libdir)/$$fn.new \ - $(DESTDIR)$(libdir)/$$fn; \ + cp $$l "$(DESTDIR)$(libdir)/$$fn.new"; \ + $(RANLIB) "$(DESTDIR)$(libdir)/$$fn.new"; \ + chmod 644 "$(DESTDIR)$(libdir)/$$fn.new"; \ + mv -f "$(DESTDIR)$(libdir)/$$fn.new" \ + "$(DESTDIR)$(libdir)/$$fn"; \ done @ : {- output_off() if $disabled{shared}; "" -} @set -e; for s in $(INSTALL_SHLIB_INFO); do \ @@ -629,61 +629,61 @@ install_dev: install_runtime_libs : {- output_off(); output_on() unless windowsdll() or sharedaix(); "" -}; \ if [ "$$fn1" != "$$fn2" ]; then \ $(ECHO) "link $(DESTDIR)$(libdir)/$$fn2 -> $(DESTDIR)$(libdir)/$$fn1"; \ - ln -sf $$fn1 $(DESTDIR)$(libdir)/$$fn2; \ + ln -sf $$fn1 "$(DESTDIR)$(libdir)/$$fn2"; \ fi; \ : {- output_off() unless windowsdll() or sharedaix(); output_on() if windowsdll(); "" -}; \ $(ECHO) "install $$s2 -> $(DESTDIR)$(libdir)/$$fn2"; \ - cp $$s2 $(DESTDIR)$(libdir)/$$fn2.new; \ - chmod 755 $(DESTDIR)$(libdir)/$$fn2.new; \ - mv -f $(DESTDIR)$(libdir)/$$fn2.new \ - $(DESTDIR)$(libdir)/$$fn2; \ + cp $$s2 "$(DESTDIR)$(libdir)/$$fn2.new"; \ + chmod 755 "$(DESTDIR)$(libdir)/$$fn2.new"; \ + mv -f "$(DESTDIR)$(libdir)/$$fn2.new" \ + "$(DESTDIR)$(libdir)/$$fn2"; \ : {- output_off() if windowsdll(); output_on() if sharedaix(); "" -}; \ - a=$(DESTDIR)$(libdir)/$$fn2; \ + a="$(DESTDIR)$(libdir)/$$fn2"; \ $(ECHO) "install $$s1 -> $$a"; \ - if [ -f $$a ]; then ( trap "rm -rf /tmp/ar.$$$$" INT 0; \ + if [ -f "$$a" ]; then ( trap "rm -rf /tmp/ar.$$$$" INT 0; \ mkdir /tmp/ar.$$$$; ( cd /tmp/ar.$$$$; \ - cp -f $$a $$a.new; \ - for so in `$(AR) t $$a`; do \ - $(AR) x $$a $$so; \ - chmod u+w $$so; \ - strip -X32_64 -e $$so; \ - $(AR) r $$a.new $$so; \ + cp -f "$$a" "$$a.new"; \ + for so in `$(AR) t "$$a"`; do \ + $(AR) x "$$a" "$$so"; \ + chmod u+w "$$so"; \ + strip -X32_64 -e "$$so"; \ + $(AR) r "$$a.new" "$$so"; \ done; \ )); fi; \ - $(AR) r $$a.new $$s1; \ - mv -f $$a.new $$a; \ + $(AR) r "$$a.new" "$$s1"; \ + mv -f "$$a.new" "$$a"; \ : {- output_off() if sharedaix(); output_on(); "" -}; \ done @ : {- output_on() if $disabled{shared}; "" -} - @$(PERL) $(SRCDIR)/util/mkdir-p.pl $(DESTDIR)$(libdir)/pkgconfig + @$(PERL) $(SRCDIR)/util/mkdir-p.pl "$(DESTDIR)$(libdir)/pkgconfig" @$(ECHO) "install libcrypto.pc -> $(DESTDIR)$(libdir)/pkgconfig/libcrypto.pc" - @cp libcrypto.pc $(DESTDIR)$(libdir)/pkgconfig - @chmod 644 $(DESTDIR)$(libdir)/pkgconfig/libcrypto.pc + @cp libcrypto.pc "$(DESTDIR)$(libdir)/pkgconfig" + @chmod 644 "$(DESTDIR)$(libdir)/pkgconfig/libcrypto.pc" @$(ECHO) "install libssl.pc -> $(DESTDIR)$(libdir)/pkgconfig/libssl.pc" - @cp libssl.pc $(DESTDIR)$(libdir)/pkgconfig - @chmod 644 $(DESTDIR)$(libdir)/pkgconfig/libssl.pc + @cp libssl.pc "$(DESTDIR)$(libdir)/pkgconfig" + @chmod 644 "$(DESTDIR)$(libdir)/pkgconfig/libssl.pc" @$(ECHO) "install openssl.pc -> $(DESTDIR)$(libdir)/pkgconfig/openssl.pc" - @cp openssl.pc $(DESTDIR)$(libdir)/pkgconfig - @chmod 644 $(DESTDIR)$(libdir)/pkgconfig/openssl.pc + @cp openssl.pc "$(DESTDIR)$(libdir)/pkgconfig" + @chmod 644 "$(DESTDIR)$(libdir)/pkgconfig/openssl.pc" uninstall_dev: uninstall_runtime_libs @$(ECHO) "*** Uninstalling development files" @ : {- output_off() unless grep { $_ eq "OPENSSL_USE_APPLINK" } (@{$target{defines}}, @{$config{defines}}); "" -} @$(ECHO) "$(RM) $(DESTDIR)$(INSTALLTOP)/include/openssl/applink.c" - @$(RM) $(DESTDIR)$(INSTALLTOP)/include/openssl/applink.c + @$(RM) "$(DESTDIR)$(INSTALLTOP)/include/openssl/applink.c" @ : {- output_on() unless grep { $_ eq "OPENSSL_USE_APPLINK" } (@{$target{defines}}, @{$config{defines}}); "" -} @set -e; for i in $(SRCDIR)/include/openssl/*.h \ $(BLDDIR)/include/openssl/*.h; do \ fn=`basename $$i`; \ $(ECHO) "$(RM) $(DESTDIR)$(INSTALLTOP)/include/openssl/$$fn"; \ - $(RM) $(DESTDIR)$(INSTALLTOP)/include/openssl/$$fn; \ + $(RM) "$(DESTDIR)$(INSTALLTOP)/include/openssl/$$fn"; \ done - -$(RMDIR) $(DESTDIR)$(INSTALLTOP)/include/openssl - -$(RMDIR) $(DESTDIR)$(INSTALLTOP)/include + -$(RMDIR) "$(DESTDIR)$(INSTALLTOP)/include/openssl" + -$(RMDIR) "$(DESTDIR)$(INSTALLTOP)/include" @set -e; for l in $(INSTALL_LIBS); do \ fn=`basename $$l`; \ $(ECHO) "$(RM) $(DESTDIR)$(libdir)/$$fn"; \ - $(RM) $(DESTDIR)$(libdir)/$$fn; \ + $(RM) "$(DESTDIR)$(libdir)/$$fn"; \ done @ : {- output_off() if $disabled{shared}; "" -} @set -e; for s in $(INSTALL_SHLIB_INFO); do \ @@ -693,35 +693,35 @@ uninstall_dev: uninstall_runtime_libs fn2=`basename $$s2`; \ : {- output_off() if windowsdll(); "" -}; \ $(ECHO) "$(RM) $(DESTDIR)$(libdir)/$$fn2"; \ - $(RM) $(DESTDIR)$(libdir)/$$fn2; \ + $(RM) "$(DESTDIR)$(libdir)/$$fn2"; \ if [ "$$fn1" != "$$fn2" -a -f "$(DESTDIR)$(libdir)/$$fn1" ]; then \ $(ECHO) "$(RM) $(DESTDIR)$(libdir)/$$fn1"; \ - $(RM) $(DESTDIR)$(libdir)/$$fn1; \ + $(RM) "$(DESTDIR)$(libdir)/$$fn1"; \ fi; \ : {- output_on() if windowsdll(); "" -}{- output_off() unless windowsdll(); "" -}; \ $(ECHO) "$(RM) $(DESTDIR)$(libdir)/$$fn2"; \ - $(RM) $(DESTDIR)$(libdir)/$$fn2; \ + $(RM) "$(DESTDIR)$(libdir)/$$fn2"; \ : {- output_on() unless windowsdll(); "" -}; \ done @ : {- output_on() if $disabled{shared}; "" -} - $(RM) $(DESTDIR)$(libdir)/pkgconfig/libcrypto.pc - $(RM) $(DESTDIR)$(libdir)/pkgconfig/libssl.pc - $(RM) $(DESTDIR)$(libdir)/pkgconfig/openssl.pc - -$(RMDIR) $(DESTDIR)$(libdir)/pkgconfig - -$(RMDIR) $(DESTDIR)$(libdir) + $(RM) "$(DESTDIR)$(libdir)/pkgconfig/libcrypto.pc" + $(RM) "$(DESTDIR)$(libdir)/pkgconfig/libssl.pc" + $(RM) "$(DESTDIR)$(libdir)/pkgconfig/openssl.pc" + -$(RMDIR) "$(DESTDIR)$(libdir)/pkgconfig" + -$(RMDIR) "$(DESTDIR)$(libdir)" install_engines: install_runtime_libs build_engines @[ -n "$(INSTALLTOP)" ] || (echo INSTALLTOP should not be empty; exit 1) - @$(PERL) $(SRCDIR)/util/mkdir-p.pl $(DESTDIR)$(ENGINESDIR)/ + @$(PERL) $(SRCDIR)/util/mkdir-p.pl "$(DESTDIR)$(ENGINESDIR)/" @$(ECHO) "*** Installing engines" @set -e; for e in dummy $(INSTALL_ENGINES); do \ if [ "$$e" = "dummy" ]; then continue; fi; \ fn=`basename $$e`; \ $(ECHO) "install $$e -> $(DESTDIR)$(ENGINESDIR)/$$fn"; \ - cp $$e $(DESTDIR)$(ENGINESDIR)/$$fn.new; \ - chmod 755 $(DESTDIR)$(ENGINESDIR)/$$fn.new; \ - mv -f $(DESTDIR)$(ENGINESDIR)/$$fn.new \ - $(DESTDIR)$(ENGINESDIR)/$$fn; \ + cp $$e "$(DESTDIR)$(ENGINESDIR)/$$fn.new"; \ + chmod 755 "$(DESTDIR)$(ENGINESDIR)/$$fn.new"; \ + mv -f "$(DESTDIR)$(ENGINESDIR)/$$fn.new" \ + "$(DESTDIR)$(ENGINESDIR)/$$fn"; \ done uninstall_engines: @@ -733,18 +733,18 @@ uninstall_engines: continue; \ fi; \ $(ECHO) "$(RM) $(DESTDIR)$(ENGINESDIR)/$$fn"; \ - $(RM) $(DESTDIR)$(ENGINESDIR)/$$fn; \ + $(RM) "$(DESTDIR)$(ENGINESDIR)/$$fn"; \ done - -$(RMDIR) $(DESTDIR)$(ENGINESDIR) + -$(RMDIR) "$(DESTDIR)$(ENGINESDIR)" install_runtime: install_programs install_runtime_libs: build_libs @[ -n "$(INSTALLTOP)" ] || (echo INSTALLTOP should not be empty; exit 1) @ : {- output_off() if windowsdll(); "" -} - @$(PERL) $(SRCDIR)/util/mkdir-p.pl $(DESTDIR)$(libdir) + @$(PERL) $(SRCDIR)/util/mkdir-p.pl "$(DESTDIR)$(libdir)" @ : {- output_on() if windowsdll(); output_off() unless windowsdll(); "" -} - @$(PERL) $(SRCDIR)/util/mkdir-p.pl $(DESTDIR)$(INSTALLTOP)/bin + @$(PERL) $(SRCDIR)/util/mkdir-p.pl "$(DESTDIR)$(INSTALLTOP)/bin" @ : {- output_on() unless windowsdll(); "" -} @$(ECHO) "*** Installing runtime libraries" @set -e; for s in dummy $(INSTALL_SHLIBS); do \ @@ -752,40 +752,40 @@ install_runtime_libs: build_libs fn=`basename $$s`; \ : {- output_off() unless windowsdll(); "" -}; \ $(ECHO) "install $$s -> $(DESTDIR)$(INSTALLTOP)/bin/$$fn"; \ - cp $$s $(DESTDIR)$(INSTALLTOP)/bin/$$fn.new; \ - chmod 755 $(DESTDIR)$(INSTALLTOP)/bin/$$fn.new; \ - mv -f $(DESTDIR)$(INSTALLTOP)/bin/$$fn.new \ - $(DESTDIR)$(INSTALLTOP)/bin/$$fn; \ + cp $$s "$(DESTDIR)$(INSTALLTOP)/bin/$$fn.new"; \ + chmod 755 "$(DESTDIR)$(INSTALLTOP)/bin/$$fn.new"; \ + mv -f "$(DESTDIR)$(INSTALLTOP)/bin/$$fn.new" \ + "$(DESTDIR)$(INSTALLTOP)/bin/$$fn"; \ : {- output_on() unless windowsdll(); "" -}{- output_off() if windowsdll(); "" -}; \ $(ECHO) "install $$s -> $(DESTDIR)$(libdir)/$$fn"; \ - cp $$s $(DESTDIR)$(libdir)/$$fn.new; \ - chmod 755 $(DESTDIR)$(libdir)/$$fn.new; \ - mv -f $(DESTDIR)$(libdir)/$$fn.new \ - $(DESTDIR)$(libdir)/$$fn; \ + cp $$s "$(DESTDIR)$(libdir)/$$fn.new"; \ + chmod 755 "$(DESTDIR)$(libdir)/$$fn.new"; \ + mv -f "$(DESTDIR)$(libdir)/$$fn.new" \ + "$(DESTDIR)$(libdir)/$$fn"; \ : {- output_on() if windowsdll(); "" -}; \ done install_programs: install_runtime_libs build_programs @[ -n "$(INSTALLTOP)" ] || (echo INSTALLTOP should not be empty; exit 1) - @$(PERL) $(SRCDIR)/util/mkdir-p.pl $(DESTDIR)$(INSTALLTOP)/bin + @$(PERL) $(SRCDIR)/util/mkdir-p.pl "$(DESTDIR)$(INSTALLTOP)/bin" @$(ECHO) "*** Installing runtime programs" @set -e; for x in dummy $(INSTALL_PROGRAMS); do \ if [ "$$x" = "dummy" ]; then continue; fi; \ fn=`basename $$x`; \ $(ECHO) "install $$x -> $(DESTDIR)$(INSTALLTOP)/bin/$$fn"; \ - cp $$x $(DESTDIR)$(INSTALLTOP)/bin/$$fn.new; \ - chmod 755 $(DESTDIR)$(INSTALLTOP)/bin/$$fn.new; \ - mv -f $(DESTDIR)$(INSTALLTOP)/bin/$$fn.new \ - $(DESTDIR)$(INSTALLTOP)/bin/$$fn; \ + cp $$x "$(DESTDIR)$(INSTALLTOP)/bin/$$fn.new"; \ + chmod 755 "$(DESTDIR)$(INSTALLTOP)/bin/$$fn.new"; \ + mv -f "$(DESTDIR)$(INSTALLTOP)/bin/$$fn.new" \ + "$(DESTDIR)$(INSTALLTOP)/bin/$$fn"; \ done @set -e; for x in dummy $(BIN_SCRIPTS); do \ if [ "$$x" = "dummy" ]; then continue; fi; \ fn=`basename $$x`; \ $(ECHO) "install $$x -> $(DESTDIR)$(INSTALLTOP)/bin/$$fn"; \ - cp $$x $(DESTDIR)$(INSTALLTOP)/bin/$$fn.new; \ - chmod 755 $(DESTDIR)$(INSTALLTOP)/bin/$$fn.new; \ - mv -f $(DESTDIR)$(INSTALLTOP)/bin/$$fn.new \ - $(DESTDIR)$(INSTALLTOP)/bin/$$fn; \ + cp $$x "$(DESTDIR)$(INSTALLTOP)/bin/$$fn.new"; \ + chmod 755 "$(DESTDIR)$(INSTALLTOP)/bin/$$fn.new"; \ + mv -f "$(DESTDIR)$(INSTALLTOP)/bin/$$fn.new" \ + "$(DESTDIR)$(INSTALLTOP)/bin/$$fn"; \ done uninstall_runtime: uninstall_programs uninstall_runtime_libs @@ -797,16 +797,16 @@ uninstall_programs: if [ "$$x" = "dummy" ]; then continue; fi; \ fn=`basename $$x`; \ $(ECHO) "$(RM) $(DESTDIR)$(INSTALLTOP)/bin/$$fn"; \ - $(RM) $(DESTDIR)$(INSTALLTOP)/bin/$$fn; \ + $(RM) "$(DESTDIR)$(INSTALLTOP)/bin/$$fn"; \ done; @set -e; for x in dummy $(BIN_SCRIPTS); \ do \ if [ "$$x" = "dummy" ]; then continue; fi; \ fn=`basename $$x`; \ $(ECHO) "$(RM) $(DESTDIR)$(INSTALLTOP)/bin/$$fn"; \ - $(RM) $(DESTDIR)$(INSTALLTOP)/bin/$$fn; \ + $(RM) "$(DESTDIR)$(INSTALLTOP)/bin/$$fn"; \ done - -$(RMDIR) $(DESTDIR)$(INSTALLTOP)/bin + -$(RMDIR) "$(DESTDIR)$(INSTALLTOP)/bin" uninstall_runtime_libs: @$(ECHO) "*** Uninstalling runtime libraries" @@ -815,7 +815,7 @@ uninstall_runtime_libs: if [ "$$s" = "dummy" ]; then continue; fi; \ fn=`basename $$s`; \ $(ECHO) "$(RM) $(DESTDIR)$(INSTALLTOP)/bin/$$fn"; \ - $(RM) $(DESTDIR)$(INSTALLTOP)/bin/$$fn; \ + $(RM) "$(DESTDIR)$(INSTALLTOP)/bin/$$fn"; \ done @ : {- output_on() unless windowsdll(); "" -} @@ -824,24 +824,24 @@ install_man_docs: @[ -n "$(INSTALLTOP)" ] || (echo INSTALLTOP should not be empty; exit 1) @$(ECHO) "*** Installing manpages" $(PERL) $(SRCDIR)/util/process_docs.pl \ - --destdir=$(DESTDIR)$(MANDIR) --type=man --suffix=$(MANSUFFIX) + "--destdir=$(DESTDIR)$(MANDIR)" --type=man --suffix=$(MANSUFFIX) uninstall_man_docs: @$(ECHO) "*** Uninstalling manpages" $(PERL) $(SRCDIR)/util/process_docs.pl \ - --destdir=$(DESTDIR)$(MANDIR) --type=man --suffix=$(MANSUFFIX) \ + "--destdir=$(DESTDIR)$(MANDIR)" --type=man --suffix=$(MANSUFFIX) \ --remove install_html_docs: @[ -n "$(INSTALLTOP)" ] || (echo INSTALLTOP should not be empty; exit 1) @$(ECHO) "*** Installing HTML manpages" $(PERL) $(SRCDIR)/util/process_docs.pl \ - --destdir=$(DESTDIR)$(HTMLDIR) --type=html + "--destdir=$(DESTDIR)$(HTMLDIR)" --type=html uninstall_html_docs: @$(ECHO) "*** Uninstalling manpages" $(PERL) $(SRCDIR)/util/process_docs.pl \ - --destdir=$(DESTDIR)$(HTMLDIR) --type=html --remove + "--destdir=$(DESTDIR)$(HTMLDIR)" --type=html --remove # Developer targets (note: these are only available on Unix) ######### diff --git a/deps/openssl/openssl/NEWS b/deps/openssl/openssl/NEWS index eba6c3b6d93fc2..85470793b20996 100644 --- a/deps/openssl/openssl/NEWS +++ b/deps/openssl/openssl/NEWS @@ -5,10 +5,16 @@ This file gives a brief overview of the major changes between each OpenSSL release. For more details please read the CHANGES file. + Major changes between OpenSSL 1.1.1e and OpenSSL 1.1.1f [31 Mar 2020] + + o Revert the unexpected EOF reporting via SSL_ERROR_SSL + Major changes between OpenSSL 1.1.1d and OpenSSL 1.1.1e [17 Mar 2020] o Fixed an overflow bug in the x64_64 Montgomery squaring procedure used in exponentiation with 512-bit moduli (CVE-2019-1551) + o Properly detect unexpected EOF while reading in libssl and report + it via SSL_ERROR_SSL Major changes between OpenSSL 1.1.1c and OpenSSL 1.1.1d [10 Sep 2019] diff --git a/deps/openssl/openssl/README b/deps/openssl/openssl/README index 8e9ce75a335d74..d7d44aa3a01b13 100644 --- a/deps/openssl/openssl/README +++ b/deps/openssl/openssl/README @@ -1,7 +1,7 @@ - OpenSSL 1.1.1e 17 Mar 2020 + OpenSSL 1.1.1f 31 Mar 2020 - Copyright (c) 1998-2019 The OpenSSL Project + Copyright (c) 1998-2020 The OpenSSL Project Copyright (c) 1995-1998 Eric A. Young, Tim J. Hudson All rights reserved. diff --git a/deps/openssl/openssl/apps/rehash.c b/deps/openssl/openssl/apps/rehash.c index 2b769fbceb87ef..fc1dffe974976d 100644 --- a/deps/openssl/openssl/apps/rehash.c +++ b/deps/openssl/openssl/apps/rehash.c @@ -1,5 +1,5 @@ /* - * Copyright 2015-2019 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2013-2014 Timo Teräs * * Licensed under the OpenSSL license (the "License"). You may not use @@ -274,11 +274,19 @@ static int do_file(const char *filename, const char *fullpath, enum Hash h) if (x->x509 != NULL) { type = TYPE_CERT; name = X509_get_subject_name(x->x509); - X509_digest(x->x509, evpmd, digest, NULL); + if (!X509_digest(x->x509, evpmd, digest, NULL)) { + BIO_printf(bio_err, "out of memory\n"); + ++errs; + goto end; + } } else if (x->crl != NULL) { type = TYPE_CRL; name = X509_CRL_get_issuer(x->crl); - X509_CRL_digest(x->crl, evpmd, digest, NULL); + if (!X509_CRL_digest(x->crl, evpmd, digest, NULL)) { + BIO_printf(bio_err, "out of memory\n"); + ++errs; + goto end; + } } else { ++errs; goto end; diff --git a/deps/openssl/openssl/apps/s_server.c b/deps/openssl/openssl/apps/s_server.c index 2248a432e26814..0ba75999fd286e 100644 --- a/deps/openssl/openssl/apps/s_server.c +++ b/deps/openssl/openssl/apps/s_server.c @@ -1,5 +1,5 @@ /* - * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved * Copyright 2005 Nokia. All rights reserved. * @@ -1904,7 +1904,7 @@ int s_server_main(int argc, char *argv[]) BIO_printf(bio_s_out, "Setting secondary ctx parameters\n"); if (sdebug) - ssl_ctx_security_debug(ctx, sdebug); + ssl_ctx_security_debug(ctx2, sdebug); if (session_id_prefix) { if (strlen(session_id_prefix) >= 32) diff --git a/deps/openssl/openssl/crypto/bn/bn_local.h b/deps/openssl/openssl/crypto/bn/bn_local.h index 37228104c640f4..8ad69ccd3639ed 100644 --- a/deps/openssl/openssl/crypto/bn/bn_local.h +++ b/deps/openssl/openssl/crypto/bn/bn_local.h @@ -1,5 +1,5 @@ /* - * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -654,9 +654,6 @@ BIGNUM *int_bn_mod_inverse(BIGNUM *in, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx, int *noinv); -int bn_probable_prime_dh(BIGNUM *rnd, int bits, - const BIGNUM *add, const BIGNUM *rem, BN_CTX *ctx); - static ossl_inline BIGNUM *bn_expand(BIGNUM *a, int bits) { if (bits > (INT_MAX - BN_BITS2 + 1)) diff --git a/deps/openssl/openssl/crypto/bn/bn_prime.c b/deps/openssl/openssl/crypto/bn/bn_prime.c index 6d74da26d3c702..d0cf3779fa50d8 100644 --- a/deps/openssl/openssl/crypto/bn/bn_prime.c +++ b/deps/openssl/openssl/crypto/bn/bn_prime.c @@ -1,5 +1,5 @@ /* - * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -22,10 +22,12 @@ static int witness(BIGNUM *w, const BIGNUM *a, const BIGNUM *a1, const BIGNUM *a1_odd, int k, BN_CTX *ctx, BN_MONT_CTX *mont); -static int probable_prime(BIGNUM *rnd, int bits, prime_t *mods); -static int probable_prime_dh_safe(BIGNUM *rnd, int bits, - const BIGNUM *add, const BIGNUM *rem, - BN_CTX *ctx); +static int probable_prime(BIGNUM *rnd, int bits, int safe, prime_t *mods); +static int probable_prime_dh(BIGNUM *rnd, int bits, int safe, prime_t *mods, + const BIGNUM *add, const BIGNUM *rem, + BN_CTX *ctx); + +#define square(x) ((BN_ULONG)(x) * (BN_ULONG)(x)) int BN_GENCB_call(BN_GENCB *cb, int a, int b) { @@ -87,16 +89,11 @@ int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, loop: /* make a random number and set the top and bottom bits */ if (add == NULL) { - if (!probable_prime(ret, bits, mods)) + if (!probable_prime(ret, bits, safe, mods)) goto err; } else { - if (safe) { - if (!probable_prime_dh_safe(ret, bits, add, rem, ctx)) - goto err; - } else { - if (!bn_probable_prime_dh(ret, bits, add, rem, ctx)) - goto err; - } + if (!probable_prime_dh(ret, bits, safe, mods, add, rem, ctx)) + goto err; } if (!BN_GENCB_call(cb, 0, c1++)) @@ -272,17 +269,18 @@ static int witness(BIGNUM *w, const BIGNUM *a, const BIGNUM *a1, return 1; } -static int probable_prime(BIGNUM *rnd, int bits, prime_t *mods) +static int probable_prime(BIGNUM *rnd, int bits, int safe, prime_t *mods) { int i; BN_ULONG delta; BN_ULONG maxdelta = BN_MASK2 - primes[NUMPRIMES - 1]; - char is_single_word = bits <= BN_BITS2; again: /* TODO: Not all primes are private */ if (!BN_priv_rand(rnd, bits, BN_RAND_TOP_TWO, BN_RAND_BOTTOM_ODD)) return 0; + if (safe && !BN_set_bit(rnd, 1)) + return 0; /* we now have a random number 'rnd' to test. */ for (i = 1; i < NUMPRIMES; i++) { BN_ULONG mod = BN_mod_word(rnd, (BN_ULONG)primes[i]); @@ -290,61 +288,25 @@ static int probable_prime(BIGNUM *rnd, int bits, prime_t *mods) return 0; mods[i] = (prime_t) mod; } - /* - * If bits is so small that it fits into a single word then we - * additionally don't want to exceed that many bits. - */ - if (is_single_word) { - BN_ULONG size_limit; - - if (bits == BN_BITS2) { - /* - * Shifting by this much has undefined behaviour so we do it a - * different way - */ - size_limit = ~((BN_ULONG)0) - BN_get_word(rnd); - } else { - size_limit = (((BN_ULONG)1) << bits) - BN_get_word(rnd) - 1; - } - if (size_limit < maxdelta) - maxdelta = size_limit; - } delta = 0; loop: - if (is_single_word) { - BN_ULONG rnd_word = BN_get_word(rnd); - - /*- - * In the case that the candidate prime is a single word then - * we check that: - * 1) It's greater than primes[i] because we shouldn't reject - * 3 as being a prime number because it's a multiple of - * three. - * 2) That it's not a multiple of a known prime. We don't - * check that rnd-1 is also coprime to all the known - * primes because there aren't many small primes where - * that's true. + for (i = 1; i < NUMPRIMES; i++) { + /* + * check that rnd is a prime and also that + * gcd(rnd-1,primes) == 1 (except for 2) + * do the second check only if we are interested in safe primes + * in the case that the candidate prime is a single word then + * we check only the primes up to sqrt(rnd) */ - for (i = 1; i < NUMPRIMES && primes[i] < rnd_word; i++) { - if ((mods[i] + delta) % primes[i] == 0) { - delta += 2; - if (delta > maxdelta) - goto again; - goto loop; - } - } - } else { - for (i = 1; i < NUMPRIMES; i++) { - /* - * check that rnd is not a prime and also that gcd(rnd-1,primes) - * == 1 (except for 2) - */ - if (((mods[i] + delta) % primes[i]) <= 1) { - delta += 2; - if (delta > maxdelta) - goto again; - goto loop; - } + if (bits <= 31 && delta <= 0x7fffffff + && square(primes[i]) > BN_get_word(rnd) + delta) + break; + if (safe ? (mods[i] + delta) % primes[i] <= 1 + : (mods[i] + delta) % primes[i] == 0) { + delta += safe ? 4 : 2; + if (delta > maxdelta) + goto again; + goto loop; } } if (!BN_add_word(rnd, delta)) @@ -355,16 +317,23 @@ static int probable_prime(BIGNUM *rnd, int bits, prime_t *mods) return 1; } -int bn_probable_prime_dh(BIGNUM *rnd, int bits, - const BIGNUM *add, const BIGNUM *rem, BN_CTX *ctx) +static int probable_prime_dh(BIGNUM *rnd, int bits, int safe, prime_t *mods, + const BIGNUM *add, const BIGNUM *rem, + BN_CTX *ctx) { int i, ret = 0; BIGNUM *t1; + BN_ULONG delta; + BN_ULONG maxdelta = BN_MASK2 - primes[NUMPRIMES - 1]; BN_CTX_start(ctx); if ((t1 = BN_CTX_get(ctx)) == NULL) goto err; + if (maxdelta > BN_MASK2 - BN_get_word(add)) + maxdelta = BN_MASK2 - BN_get_word(add); + + again: if (!BN_rand(rnd, bits, BN_RAND_TOP_ONE, BN_RAND_BOTTOM_ODD)) goto err; @@ -375,98 +344,48 @@ int bn_probable_prime_dh(BIGNUM *rnd, int bits, if (!BN_sub(rnd, rnd, t1)) goto err; if (rem == NULL) { - if (!BN_add_word(rnd, 1)) + if (!BN_add_word(rnd, safe ? 3u : 1u)) goto err; } else { if (!BN_add(rnd, rnd, rem)) goto err; } - /* we now have a random number 'rand' to test. */ + if (BN_num_bits(rnd) < bits + || BN_get_word(rnd) < (safe ? 5u : 3u)) { + if (!BN_add(rnd, rnd, add)) + goto err; + } - loop: + /* we now have a random number 'rnd' to test. */ for (i = 1; i < NUMPRIMES; i++) { - /* check that rnd is a prime */ BN_ULONG mod = BN_mod_word(rnd, (BN_ULONG)primes[i]); if (mod == (BN_ULONG)-1) goto err; - if (mod <= 1) { - if (!BN_add(rnd, rnd, add)) - goto err; - goto loop; - } - } - ret = 1; - - err: - BN_CTX_end(ctx); - bn_check_top(rnd); - return ret; -} - -static int probable_prime_dh_safe(BIGNUM *p, int bits, const BIGNUM *padd, - const BIGNUM *rem, BN_CTX *ctx) -{ - int i, ret = 0; - BIGNUM *t1, *qadd, *q; - - bits--; - BN_CTX_start(ctx); - t1 = BN_CTX_get(ctx); - q = BN_CTX_get(ctx); - qadd = BN_CTX_get(ctx); - if (qadd == NULL) - goto err; - - if (!BN_rshift1(qadd, padd)) - goto err; - - if (!BN_rand(q, bits, BN_RAND_TOP_ONE, BN_RAND_BOTTOM_ODD)) - goto err; - - /* we need ((rnd-rem) % add) == 0 */ - if (!BN_mod(t1, q, qadd, ctx)) - goto err; - if (!BN_sub(q, q, t1)) - goto err; - if (rem == NULL) { - if (!BN_add_word(q, 1)) - goto err; - } else { - if (!BN_rshift1(t1, rem)) - goto err; - if (!BN_add(q, q, t1)) - goto err; + mods[i] = (prime_t) mod; } - - /* we now have a random number 'rand' to test. */ - if (!BN_lshift1(p, q)) - goto err; - if (!BN_add_word(p, 1)) - goto err; - + delta = 0; loop: for (i = 1; i < NUMPRIMES; i++) { - /* check that p and q are prime */ - /* - * check that for p and q gcd(p-1,primes) == 1 (except for 2) - */ - BN_ULONG pmod = BN_mod_word(p, (BN_ULONG)primes[i]); - BN_ULONG qmod = BN_mod_word(q, (BN_ULONG)primes[i]); - if (pmod == (BN_ULONG)-1 || qmod == (BN_ULONG)-1) - goto err; - if (pmod == 0 || qmod == 0) { - if (!BN_add(p, p, padd)) - goto err; - if (!BN_add(q, q, qadd)) - goto err; + /* check that rnd is a prime */ + if (bits <= 31 && delta <= 0x7fffffff + && square(primes[i]) > BN_get_word(rnd) + delta) + break; + /* rnd mod p == 1 implies q = (rnd-1)/2 is divisible by p */ + if (safe ? (mods[i] + delta) % primes[i] <= 1 + : (mods[i] + delta) % primes[i] == 0) { + delta += BN_get_word(add); + if (delta > maxdelta) + goto again; goto loop; } } + if (!BN_add_word(rnd, delta)) + goto err; ret = 1; err: BN_CTX_end(ctx); - bn_check_top(p); + bn_check_top(rnd); return ret; } diff --git a/deps/openssl/openssl/crypto/conf/conf_lib.c b/deps/openssl/openssl/crypto/conf/conf_lib.c index 0b7dd26d63b0a0..add1dfa1c18132 100644 --- a/deps/openssl/openssl/crypto/conf/conf_lib.c +++ b/deps/openssl/openssl/crypto/conf/conf_lib.c @@ -1,5 +1,5 @@ /* - * Copyright 2000-2019 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2000-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -356,8 +356,10 @@ OPENSSL_INIT_SETTINGS *OPENSSL_INIT_new(void) { OPENSSL_INIT_SETTINGS *ret = malloc(sizeof(*ret)); - if (ret != NULL) - memset(ret, 0, sizeof(*ret)); + if (ret == NULL) + return NULL; + + memset(ret, 0, sizeof(*ret)); ret->flags = DEFAULT_CONF_MFLAGS; return ret; diff --git a/deps/openssl/openssl/crypto/err/openssl.txt b/deps/openssl/openssl/crypto/err/openssl.txt index f5324c6819d8e2..35512f9caf96b0 100644 --- a/deps/openssl/openssl/crypto/err/openssl.txt +++ b/deps/openssl/openssl/crypto/err/openssl.txt @@ -2852,7 +2852,6 @@ SSL_R_UNABLE_TO_LOAD_SSL3_MD5_ROUTINES:242:unable to load ssl3 md5 routines SSL_R_UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES:243:unable to load ssl3 sha1 routines SSL_R_UNEXPECTED_CCS_MESSAGE:262:unexpected ccs message SSL_R_UNEXPECTED_END_OF_EARLY_DATA:178:unexpected end of early data -SSL_R_UNEXPECTED_EOF_WHILE_READING:294:unexpected eof while reading SSL_R_UNEXPECTED_MESSAGE:244:unexpected message SSL_R_UNEXPECTED_RECORD:245:unexpected record SSL_R_UNINITIALIZED:276:uninitialized diff --git a/deps/openssl/openssl/crypto/evp/e_aes.c b/deps/openssl/openssl/crypto/evp/e_aes.c index 4ebceeb95bb548..405ddbf9bf09e0 100644 --- a/deps/openssl/openssl/crypto/evp/e_aes.c +++ b/deps/openssl/openssl/crypto/evp/e_aes.c @@ -393,7 +393,7 @@ static int aesni_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, /* * Verify that the two keys are different. - * + * * This addresses Rogaway's vulnerability. * See comment in aes_xts_init_key() below. */ @@ -813,7 +813,7 @@ static int aes_t4_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, /* * Verify that the two keys are different. - * + * * This addresses Rogaway's vulnerability. * See comment in aes_xts_init_key() below. */ diff --git a/deps/openssl/openssl/crypto/ex_data.c b/deps/openssl/openssl/crypto/ex_data.c index 22f3b70edf14e3..0f5a9295056abe 100644 --- a/deps/openssl/openssl/crypto/ex_data.c +++ b/deps/openssl/openssl/crypto/ex_data.c @@ -1,5 +1,5 @@ /* - * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -235,7 +235,7 @@ int CRYPTO_new_ex_data(int class_index, void *obj, CRYPTO_EX_DATA *ad) return 0; } for (i = 0; i < mx; i++) { - if (storage[i] && storage[i]->new_func) { + if (storage[i] != NULL && storage[i]->new_func != NULL) { ptr = CRYPTO_get_ex_data(ad, i); storage[i]->new_func(obj, ptr, ad, i, storage[i]->argl, storage[i]->argp); @@ -299,7 +299,7 @@ int CRYPTO_dup_ex_data(int class_index, CRYPTO_EX_DATA *to, for (i = 0; i < mx; i++) { ptr = CRYPTO_get_ex_data(from, i); - if (storage[i] && storage[i]->dup_func) + if (storage[i] != NULL && storage[i]->dup_func != NULL) if (!storage[i]->dup_func(to, from, &ptr, i, storage[i]->argl, storage[i]->argp)) goto err; diff --git a/deps/openssl/openssl/crypto/pkcs12/p12_crt.c b/deps/openssl/openssl/crypto/pkcs12/p12_crt.c index d43dc3b30cf3a6..bfcae3f697e950 100644 --- a/deps/openssl/openssl/crypto/pkcs12/p12_crt.c +++ b/deps/openssl/openssl/crypto/pkcs12/p12_crt.c @@ -1,5 +1,5 @@ /* - * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -62,7 +62,8 @@ PKCS12 *PKCS12_create(const char *pass, const char *name, EVP_PKEY *pkey, X509 * if (pkey && cert) { if (!X509_check_private_key(cert, pkey)) return NULL; - X509_digest(cert, EVP_sha1(), keyid, &keyidlen); + if (!X509_digest(cert, EVP_sha1(), keyid, &keyidlen)) + return NULL; } if (cert) { diff --git a/deps/openssl/openssl/crypto/ts/ts_rsp_sign.c b/deps/openssl/openssl/crypto/ts/ts_rsp_sign.c index a584ae5f5edde0..041a187da68c6a 100644 --- a/deps/openssl/openssl/crypto/ts/ts_rsp_sign.c +++ b/deps/openssl/openssl/crypto/ts/ts_rsp_sign.c @@ -1,5 +1,5 @@ /* - * Copyright 2006-2018 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2006-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -771,7 +771,8 @@ static ESS_CERT_ID *ess_CERT_ID_new_init(X509 *cert, int issuer_needed) X509_check_purpose(cert, -1, 0); if ((cid = ESS_CERT_ID_new()) == NULL) goto err; - X509_digest(cert, EVP_sha1(), cert_sha1, NULL); + if (!X509_digest(cert, EVP_sha1(), cert_sha1, NULL)) + goto err; if (!ASN1_OCTET_STRING_set(cid->hash, cert_sha1, SHA_DIGEST_LENGTH)) goto err; diff --git a/deps/openssl/openssl/crypto/ts/ts_rsp_verify.c b/deps/openssl/openssl/crypto/ts/ts_rsp_verify.c index 086021247c01ca..c2e7abd67f5096 100644 --- a/deps/openssl/openssl/crypto/ts/ts_rsp_verify.c +++ b/deps/openssl/openssl/crypto/ts/ts_rsp_verify.c @@ -1,5 +1,5 @@ /* - * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2006-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -289,11 +289,12 @@ static int ts_find_cert(STACK_OF(ESS_CERT_ID) *cert_ids, X509 *cert) if (!cert_ids || !cert) return -1; - X509_digest(cert, EVP_sha1(), cert_sha1, NULL); - /* Recompute SHA1 hash of certificate if necessary (side effect). */ X509_check_purpose(cert, -1, 0); + if (!X509_digest(cert, EVP_sha1(), cert_sha1, NULL)) + return -1; + /* Look for cert in the cert_ids vector. */ for (i = 0; i < sk_ESS_CERT_ID_num(cert_ids); ++i) { ESS_CERT_ID *cid = sk_ESS_CERT_ID_value(cert_ids, i); @@ -326,7 +327,8 @@ static int ts_find_cert_v2(STACK_OF(ESS_CERT_ID_V2) *cert_ids, X509 *cert) else md = EVP_sha256(); - X509_digest(cert, md, cert_digest, &len); + if (!X509_digest(cert, md, cert_digest, &len)) + return -1; if (cid->hash->length != (int)len) return -1; diff --git a/deps/openssl/openssl/crypto/x509/x509_cmp.c b/deps/openssl/openssl/crypto/x509/x509_cmp.c index e06489c3347b94..d1600e1e8ddab4 100644 --- a/deps/openssl/openssl/crypto/x509/x509_cmp.c +++ b/deps/openssl/openssl/crypto/x509/x509_cmp.c @@ -1,5 +1,5 @@ /* - * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -134,9 +134,12 @@ unsigned long X509_subject_name_hash_old(X509 *x) int X509_cmp(const X509 *a, const X509 *b) { int rv; + /* ensure hash is valid */ - X509_check_purpose((X509 *)a, -1, 0); - X509_check_purpose((X509 *)b, -1, 0); + if (X509_check_purpose((X509 *)a, -1, 0) != 1) + return -2; + if (X509_check_purpose((X509 *)b, -1, 0) != 1) + return -2; rv = memcmp(a->sha1_hash, b->sha1_hash, SHA_DIGEST_LENGTH); if (rv) diff --git a/deps/openssl/openssl/crypto/x509/x509_trs.c b/deps/openssl/openssl/crypto/x509/x509_trs.c index 9e199d63e46a31..a10d437735b8df 100644 --- a/deps/openssl/openssl/crypto/x509/x509_trs.c +++ b/deps/openssl/openssl/crypto/x509/x509_trs.c @@ -1,5 +1,5 @@ /* - * Copyright 1999-2018 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -240,8 +240,9 @@ static int trust_1oid(X509_TRUST *trust, X509 *x, int flags) static int trust_compat(X509_TRUST *trust, X509 *x, int flags) { /* Call for side-effect of computing hash and caching extensions */ - X509_check_purpose(x, -1, 0); - if ((flags & X509_TRUST_NO_SS_COMPAT) == 0 && x->ex_flags & EXFLAG_SS) + if (X509_check_purpose(x, -1, 0) != 1) + return X509_TRUST_UNTRUSTED; + if ((flags & X509_TRUST_NO_SS_COMPAT) == 0 && (x->ex_flags & EXFLAG_SS)) return X509_TRUST_TRUSTED; else return X509_TRUST_UNTRUSTED; diff --git a/deps/openssl/openssl/crypto/x509/x509_vfy.c b/deps/openssl/openssl/crypto/x509/x509_vfy.c index 361954c62ee785..f28f2d2610f6db 100644 --- a/deps/openssl/openssl/crypto/x509/x509_vfy.c +++ b/deps/openssl/openssl/crypto/x509/x509_vfy.c @@ -1,5 +1,5 @@ /* - * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -107,12 +107,8 @@ static int null_callback(int ok, X509_STORE_CTX *e) /* Return 1 is a certificate is self signed */ static int cert_self_signed(X509 *x) { - /* - * FIXME: x509v3_cache_extensions() needs to detect more failures and not - * set EXFLAG_SET when that happens. Especially, if the failures are - * parse errors, rather than memory pressure! - */ - X509_check_purpose(x, -1, 0); + if (X509_check_purpose(x, -1, 0) != 1) + return 0; if (x->ex_flags & EXFLAG_SS) return 1; else diff --git a/deps/openssl/openssl/crypto/x509/x_all.c b/deps/openssl/openssl/crypto/x509/x_all.c index 6cccfa99d1a62e..aa5ccba448997d 100644 --- a/deps/openssl/openssl/crypto/x509/x_all.c +++ b/deps/openssl/openssl/crypto/x509/x_all.c @@ -1,5 +1,5 @@ /* - * Copyright 1995-2017 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -362,7 +362,8 @@ int X509_pubkey_digest(const X509 *data, const EVP_MD *type, int X509_digest(const X509 *data, const EVP_MD *type, unsigned char *md, unsigned int *len) { - if (type == EVP_sha1() && (data->ex_flags & EXFLAG_SET) != 0) { + if (type == EVP_sha1() && (data->ex_flags & EXFLAG_SET) != 0 + && (data->ex_flags & EXFLAG_INVALID) == 0) { /* Asking for SHA1 and we already computed it. */ if (len != NULL) *len = sizeof(data->sha1_hash); @@ -376,7 +377,8 @@ int X509_digest(const X509 *data, const EVP_MD *type, unsigned char *md, int X509_CRL_digest(const X509_CRL *data, const EVP_MD *type, unsigned char *md, unsigned int *len) { - if (type == EVP_sha1() && (data->flags & EXFLAG_SET) != 0) { + if (type == EVP_sha1() && (data->flags & EXFLAG_SET) != 0 + && (data->flags & EXFLAG_INVALID) == 0) { /* Asking for SHA1; always computed in CRL d2i. */ if (len != NULL) *len = sizeof(data->sha1_hash); diff --git a/deps/openssl/openssl/crypto/x509/x_crl.c b/deps/openssl/openssl/crypto/x509/x_crl.c index e864126fef3726..c9762f9e2394af 100644 --- a/deps/openssl/openssl/crypto/x509/x_crl.c +++ b/deps/openssl/openssl/crypto/x509/x_crl.c @@ -1,5 +1,5 @@ /* - * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -17,7 +17,7 @@ static int X509_REVOKED_cmp(const X509_REVOKED *const *a, const X509_REVOKED *const *b); -static void setup_idp(X509_CRL *crl, ISSUING_DIST_POINT *idp); +static int setup_idp(X509_CRL *crl, ISSUING_DIST_POINT *idp); ASN1_SEQUENCE(X509_REVOKED) = { ASN1_EMBED(X509_REVOKED,serialNumber, ASN1_INTEGER), @@ -155,7 +155,7 @@ static int crl_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it, X509_CRL *crl = (X509_CRL *)*pval; STACK_OF(X509_EXTENSION) *exts; X509_EXTENSION *ext; - int idx; + int idx, i; switch (operation) { case ASN1_OP_D2I_PRE: @@ -184,23 +184,35 @@ static int crl_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it, break; case ASN1_OP_D2I_POST: - X509_CRL_digest(crl, EVP_sha1(), crl->sha1_hash, NULL); + if (!X509_CRL_digest(crl, EVP_sha1(), crl->sha1_hash, NULL)) + crl->flags |= EXFLAG_INVALID; crl->idp = X509_CRL_get_ext_d2i(crl, - NID_issuing_distribution_point, NULL, + NID_issuing_distribution_point, &i, NULL); - if (crl->idp) - setup_idp(crl, crl->idp); + if (crl->idp != NULL) { + if (!setup_idp(crl, crl->idp)) + crl->flags |= EXFLAG_INVALID; + } + else if (i != -1) { + crl->flags |= EXFLAG_INVALID; + } crl->akid = X509_CRL_get_ext_d2i(crl, - NID_authority_key_identifier, NULL, + NID_authority_key_identifier, &i, NULL); + if (crl->akid == NULL && i != -1) + crl->flags |= EXFLAG_INVALID; crl->crl_number = X509_CRL_get_ext_d2i(crl, - NID_crl_number, NULL, NULL); + NID_crl_number, &i, NULL); + if (crl->crl_number == NULL && i != -1) + crl->flags |= EXFLAG_INVALID; crl->base_crl_number = X509_CRL_get_ext_d2i(crl, - NID_delta_crl, NULL, + NID_delta_crl, &i, NULL); + if (crl->base_crl_number == NULL && i != -1) + crl->flags |= EXFLAG_INVALID; /* Delta CRLs must have CRL number */ if (crl->base_crl_number && !crl->crl_number) crl->flags |= EXFLAG_INVALID; @@ -259,9 +271,10 @@ static int crl_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it, /* Convert IDP into a more convenient form */ -static void setup_idp(X509_CRL *crl, ISSUING_DIST_POINT *idp) +static int setup_idp(X509_CRL *crl, ISSUING_DIST_POINT *idp) { int idp_only = 0; + /* Set various flags according to IDP */ crl->idp_flags |= IDP_PRESENT; if (idp->onlyuser > 0) { @@ -292,7 +305,7 @@ static void setup_idp(X509_CRL *crl, ISSUING_DIST_POINT *idp) crl->idp_reasons &= CRLDP_ALL_REASONS; } - DIST_POINT_set_dpname(idp->distpoint, X509_CRL_get_issuer(crl)); + return DIST_POINT_set_dpname(idp->distpoint, X509_CRL_get_issuer(crl)); } ASN1_SEQUENCE_ref(X509_CRL, crl_cb) = { diff --git a/deps/openssl/openssl/crypto/x509v3/v3_purp.c b/deps/openssl/openssl/crypto/x509v3/v3_purp.c index 3f60c2ea1da336..2bc8253d2dab3a 100644 --- a/deps/openssl/openssl/crypto/x509v3/v3_purp.c +++ b/deps/openssl/openssl/crypto/x509v3/v3_purp.c @@ -1,5 +1,5 @@ /* - * Copyright 1999-2019 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -81,6 +81,8 @@ int X509_check_purpose(X509 *x, int id, int ca) const X509_PURPOSE *pt; x509v3_cache_extensions(x); + if (x->ex_flags & EXFLAG_INVALID) + return -1; /* Return if side-effect only call */ if (id == -1) @@ -300,10 +302,11 @@ int X509_supported_extension(X509_EXTENSION *ex) return 0; } -static void setup_dp(X509 *x, DIST_POINT *dp) +static int setup_dp(X509 *x, DIST_POINT *dp) { X509_NAME *iname = NULL; int i; + if (dp->reasons) { if (dp->reasons->length > 0) dp->dp_reasons = dp->reasons->data[0]; @@ -313,7 +316,7 @@ static void setup_dp(X509 *x, DIST_POINT *dp) } else dp->dp_reasons = CRLDP_ALL_REASONS; if (!dp->distpoint || (dp->distpoint->type != 1)) - return; + return 1; for (i = 0; i < sk_GENERAL_NAME_num(dp->CRLissuer); i++) { GENERAL_NAME *gen = sk_GENERAL_NAME_value(dp->CRLissuer, i); if (gen->type == GEN_DIRNAME) { @@ -324,16 +327,21 @@ static void setup_dp(X509 *x, DIST_POINT *dp) if (!iname) iname = X509_get_issuer_name(x); - DIST_POINT_set_dpname(dp->distpoint, iname); - + return DIST_POINT_set_dpname(dp->distpoint, iname); } -static void setup_crldp(X509 *x) +static int setup_crldp(X509 *x) { int i; - x->crldp = X509_get_ext_d2i(x, NID_crl_distribution_points, NULL, NULL); - for (i = 0; i < sk_DIST_POINT_num(x->crldp); i++) - setup_dp(x, sk_DIST_POINT_value(x->crldp, i)); + + x->crldp = X509_get_ext_d2i(x, NID_crl_distribution_points, &i, NULL); + if (x->crldp == NULL && i != -1) + return 0; + for (i = 0; i < sk_DIST_POINT_num(x->crldp); i++) { + if (!setup_dp(x, sk_DIST_POINT_value(x->crldp, i))) + return 0; + } + return 1; } #define V1_ROOT (EXFLAG_V1|EXFLAG_SS) @@ -366,12 +374,13 @@ static void x509v3_cache_extensions(X509 *x) return; } - X509_digest(x, EVP_sha1(), x->sha1_hash, NULL); + if (!X509_digest(x, EVP_sha1(), x->sha1_hash, NULL)) + x->ex_flags |= EXFLAG_INVALID; /* V1 should mean no extensions ... */ if (!X509_get_version(x)) x->ex_flags |= EXFLAG_V1; /* Handle basic constraints */ - if ((bs = X509_get_ext_d2i(x, NID_basic_constraints, NULL, NULL))) { + if ((bs = X509_get_ext_d2i(x, NID_basic_constraints, &i, NULL))) { if (bs->ca) x->ex_flags |= EXFLAG_CA; if (bs->pathlen) { @@ -385,9 +394,11 @@ static void x509v3_cache_extensions(X509 *x) x->ex_pathlen = -1; BASIC_CONSTRAINTS_free(bs); x->ex_flags |= EXFLAG_BCONS; + } else if (i != -1) { + x->ex_flags |= EXFLAG_INVALID; } /* Handle proxy certificates */ - if ((pci = X509_get_ext_d2i(x, NID_proxyCertInfo, NULL, NULL))) { + if ((pci = X509_get_ext_d2i(x, NID_proxyCertInfo, &i, NULL))) { if (x->ex_flags & EXFLAG_CA || X509_get_ext_by_NID(x, NID_subject_alt_name, -1) >= 0 || X509_get_ext_by_NID(x, NID_issuer_alt_name, -1) >= 0) { @@ -399,9 +410,11 @@ static void x509v3_cache_extensions(X509 *x) x->ex_pcpathlen = -1; PROXY_CERT_INFO_EXTENSION_free(pci); x->ex_flags |= EXFLAG_PROXY; + } else if (i != -1) { + x->ex_flags |= EXFLAG_INVALID; } /* Handle key usage */ - if ((usage = X509_get_ext_d2i(x, NID_key_usage, NULL, NULL))) { + if ((usage = X509_get_ext_d2i(x, NID_key_usage, &i, NULL))) { if (usage->length > 0) { x->ex_kusage = usage->data[0]; if (usage->length > 1) @@ -410,9 +423,11 @@ static void x509v3_cache_extensions(X509 *x) x->ex_kusage = 0; x->ex_flags |= EXFLAG_KUSAGE; ASN1_BIT_STRING_free(usage); + } else if (i != -1) { + x->ex_flags |= EXFLAG_INVALID; } x->ex_xkusage = 0; - if ((extusage = X509_get_ext_d2i(x, NID_ext_key_usage, NULL, NULL))) { + if ((extusage = X509_get_ext_d2i(x, NID_ext_key_usage, &i, NULL))) { x->ex_flags |= EXFLAG_XKUSAGE; for (i = 0; i < sk_ASN1_OBJECT_num(extusage); i++) { switch (OBJ_obj2nid(sk_ASN1_OBJECT_value(extusage, i))) { @@ -455,18 +470,26 @@ static void x509v3_cache_extensions(X509 *x) } } sk_ASN1_OBJECT_pop_free(extusage, ASN1_OBJECT_free); + } else if (i != -1) { + x->ex_flags |= EXFLAG_INVALID; } - if ((ns = X509_get_ext_d2i(x, NID_netscape_cert_type, NULL, NULL))) { + if ((ns = X509_get_ext_d2i(x, NID_netscape_cert_type, &i, NULL))) { if (ns->length > 0) x->ex_nscert = ns->data[0]; else x->ex_nscert = 0; x->ex_flags |= EXFLAG_NSCERT; ASN1_BIT_STRING_free(ns); + } else if (i != -1) { + x->ex_flags |= EXFLAG_INVALID; } - x->skid = X509_get_ext_d2i(x, NID_subject_key_identifier, NULL, NULL); - x->akid = X509_get_ext_d2i(x, NID_authority_key_identifier, NULL, NULL); + x->skid = X509_get_ext_d2i(x, NID_subject_key_identifier, &i, NULL); + if (x->skid == NULL && i != -1) + x->ex_flags |= EXFLAG_INVALID; + x->akid = X509_get_ext_d2i(x, NID_authority_key_identifier, &i, NULL); + if (x->akid == NULL && i != -1) + x->ex_flags |= EXFLAG_INVALID; /* Does subject name match issuer ? */ if (!X509_NAME_cmp(X509_get_subject_name(x), X509_get_issuer_name(x))) { x->ex_flags |= EXFLAG_SI; @@ -475,16 +498,22 @@ static void x509v3_cache_extensions(X509 *x) !ku_reject(x, KU_KEY_CERT_SIGN)) x->ex_flags |= EXFLAG_SS; } - x->altname = X509_get_ext_d2i(x, NID_subject_alt_name, NULL, NULL); + x->altname = X509_get_ext_d2i(x, NID_subject_alt_name, &i, NULL); + if (x->altname == NULL && i != -1) + x->ex_flags |= EXFLAG_INVALID; x->nc = X509_get_ext_d2i(x, NID_name_constraints, &i, NULL); - if (!x->nc && (i != -1)) + if (x->nc == NULL && i != -1) + x->ex_flags |= EXFLAG_INVALID; + if (!setup_crldp(x)) x->ex_flags |= EXFLAG_INVALID; - setup_crldp(x); #ifndef OPENSSL_NO_RFC3779 - x->rfc3779_addr = X509_get_ext_d2i(x, NID_sbgp_ipAddrBlock, NULL, NULL); - x->rfc3779_asid = X509_get_ext_d2i(x, NID_sbgp_autonomousSysNum, - NULL, NULL); + x->rfc3779_addr = X509_get_ext_d2i(x, NID_sbgp_ipAddrBlock, &i, NULL); + if (x->rfc3779_addr == NULL && i != -1) + x->ex_flags |= EXFLAG_INVALID; + x->rfc3779_asid = X509_get_ext_d2i(x, NID_sbgp_autonomousSysNum, &i, NULL); + if (x->rfc3779_asid == NULL && i != -1) + x->ex_flags |= EXFLAG_INVALID; #endif for (i = 0; i < X509_get_ext_count(x); i++) { ex = X509_get_ext(x, i); @@ -777,7 +806,11 @@ int X509_check_issued(X509 *issuer, X509 *subject) return X509_V_ERR_SUBJECT_ISSUER_MISMATCH; x509v3_cache_extensions(issuer); + if (issuer->ex_flags & EXFLAG_INVALID) + return X509_V_ERR_UNSPECIFIED; x509v3_cache_extensions(subject); + if (subject->ex_flags & EXFLAG_INVALID) + return X509_V_ERR_UNSPECIFIED; if (subject->akid) { int ret = X509_check_akid(issuer, subject->akid); @@ -842,7 +875,8 @@ uint32_t X509_get_extension_flags(X509 *x) uint32_t X509_get_key_usage(X509 *x) { /* Call for side-effect of computing hash and caching extensions */ - X509_check_purpose(x, -1, -1); + if (X509_check_purpose(x, -1, -1) != 1) + return 0; if (x->ex_flags & EXFLAG_KUSAGE) return x->ex_kusage; return UINT32_MAX; @@ -851,7 +885,8 @@ uint32_t X509_get_key_usage(X509 *x) uint32_t X509_get_extended_key_usage(X509 *x) { /* Call for side-effect of computing hash and caching extensions */ - X509_check_purpose(x, -1, -1); + if (X509_check_purpose(x, -1, -1) != 1) + return 0; if (x->ex_flags & EXFLAG_XKUSAGE) return x->ex_xkusage; return UINT32_MAX; @@ -860,28 +895,32 @@ uint32_t X509_get_extended_key_usage(X509 *x) const ASN1_OCTET_STRING *X509_get0_subject_key_id(X509 *x) { /* Call for side-effect of computing hash and caching extensions */ - X509_check_purpose(x, -1, -1); + if (X509_check_purpose(x, -1, -1) != 1) + return NULL; return x->skid; } const ASN1_OCTET_STRING *X509_get0_authority_key_id(X509 *x) { /* Call for side-effect of computing hash and caching extensions */ - X509_check_purpose(x, -1, -1); + if (X509_check_purpose(x, -1, -1) != 1) + return NULL; return (x->akid != NULL ? x->akid->keyid : NULL); } const GENERAL_NAMES *X509_get0_authority_issuer(X509 *x) { /* Call for side-effect of computing hash and caching extensions */ - X509_check_purpose(x, -1, -1); + if (X509_check_purpose(x, -1, -1) != 1) + return NULL; return (x->akid != NULL ? x->akid->issuer : NULL); } const ASN1_INTEGER *X509_get0_authority_serial(X509 *x) { /* Call for side-effect of computing hash and caching extensions */ - X509_check_purpose(x, -1, -1); + if (X509_check_purpose(x, -1, -1) != 1) + return NULL; return (x->akid != NULL ? x->akid->serial : NULL); } diff --git a/deps/openssl/openssl/doc/man3/BN_generate_prime.pod b/deps/openssl/openssl/doc/man3/BN_generate_prime.pod index 31fbc1ffa1743f..f1e63f3b3c4aa9 100644 --- a/deps/openssl/openssl/doc/man3/BN_generate_prime.pod +++ b/deps/openssl/openssl/doc/man3/BN_generate_prime.pod @@ -52,7 +52,9 @@ Deprecated: BN_generate_prime_ex() generates a pseudo-random prime number of at least bit length B. The returned number is probably prime -with a negligible error. +with a negligible error. If B is B the returned prime +number will have exact bit length B with the top most two +bits set. If B is not B, it will be used to store the number. @@ -89,7 +91,9 @@ If B is not B, the prime will fulfill the condition p % B generator. If B is true, it will be a safe prime (i.e. a prime p so -that (p-1)/2 is also prime). +that (p-1)/2 is also prime). If B is true, and B == B +the condition will be p % B == 3. +It is recommended that B is a multiple of 4. The random generator must be seeded prior to calling BN_generate_prime_ex(). If the automatic seeding or reseeding of the OpenSSL CSPRNG fails due to @@ -206,7 +210,7 @@ and BN_GENCB_get_arg() functions were added in OpenSSL 1.1.0. =head1 COPYRIGHT -Copyright 2000-2019 The OpenSSL Project Authors. All Rights Reserved. +Copyright 2000-2020 The OpenSSL Project Authors. All Rights Reserved. Licensed under the OpenSSL license (the "License"). You may not use this file except in compliance with the License. You can obtain a copy diff --git a/deps/openssl/openssl/doc/man3/SSL_get_error.pod b/deps/openssl/openssl/doc/man3/SSL_get_error.pod index 97320a6c153f6f..5221ccfe18049a 100644 --- a/deps/openssl/openssl/doc/man3/SSL_get_error.pod +++ b/deps/openssl/openssl/doc/man3/SSL_get_error.pod @@ -155,6 +155,18 @@ connection and SSL_shutdown() must not be called. =back +=head1 BUGS + +The B with B value of 0 indicates unexpected EOF from +the peer. This will be properly reported as B with reason +code B in the OpenSSL 3.0 release because +it is truly a TLS protocol error to terminate the connection without +a SSL_shutdown(). + +The issue is kept unfixed in OpenSSL 1.1.1 releases because many applications +which choose to ignore this protocol error depend on the existing way of +reporting the error. + =head1 SEE ALSO L @@ -166,7 +178,7 @@ The SSL_ERROR_WANT_CLIENT_HELLO_CB error code was added in OpenSSL 1.1.1. =head1 COPYRIGHT -Copyright 2000-2019 The OpenSSL Project Authors. All Rights Reserved. +Copyright 2000-2020 The OpenSSL Project Authors. All Rights Reserved. Licensed under the OpenSSL license (the "License"). You may not use this file except in compliance with the License. You can obtain a copy diff --git a/deps/openssl/openssl/doc/man3/X509_get_extension_flags.pod b/deps/openssl/openssl/doc/man3/X509_get_extension_flags.pod index 2dfe2ef3727582..43c9c952c6b770 100644 --- a/deps/openssl/openssl/doc/man3/X509_get_extension_flags.pod +++ b/deps/openssl/openssl/doc/man3/X509_get_extension_flags.pod @@ -80,6 +80,17 @@ The certificate contains an unhandled critical extension. Some certificate extension values are invalid or inconsistent. The certificate should be rejected. +This bit may also be raised after an out-of-memory error while +processing the X509 object, so it may not be related to the processed +ASN1 object itself. + +=item B + +The NID_certificate_policies certificate extension is invalid or +inconsistent. The certificate should be rejected. +This bit may also be raised after an out-of-memory error while +processing the X509 object, so it may not be related to the processed +ASN1 object itself. =item B @@ -183,7 +194,7 @@ X509_get_proxy_pathlen() were added in OpenSSL 1.1.0. =head1 COPYRIGHT -Copyright 2015-2019 The OpenSSL Project Authors. All Rights Reserved. +Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved. Licensed under the OpenSSL license (the "License"). You may not use this file except in compliance with the License. You can obtain a copy diff --git a/deps/openssl/openssl/doc/man7/proxy-certificates.pod b/deps/openssl/openssl/doc/man7/proxy-certificates.pod index bc0ae11f74eec3..df5ee1b4b51887 100644 --- a/deps/openssl/openssl/doc/man7/proxy-certificates.pod +++ b/deps/openssl/openssl/doc/man7/proxy-certificates.pod @@ -217,7 +217,7 @@ The following skeleton code can be used as a starting point: * bottom. You get the CA root first, followed by the * possible chain of intermediate CAs, followed by the EE * certificate, followed by the possible proxy - * certificates. + * certificates. */ X509 *xs = X509_STORE_CTX_get_current_cert(ctx); @@ -236,7 +236,7 @@ The following skeleton code can be used as a starting point: * by pulling them from some database. If there * are none to be found, clear all rights (making * this and any subsequent proxy certificate void - * of any rights). + * of any rights). */ memset(rights->rights, 0, sizeof(rights->rights)); break; diff --git a/deps/openssl/openssl/include/openssl/opensslv.h b/deps/openssl/openssl/include/openssl/opensslv.h index dd8ef3df722f81..8c697f5f7acc85 100644 --- a/deps/openssl/openssl/include/openssl/opensslv.h +++ b/deps/openssl/openssl/include/openssl/opensslv.h @@ -1,5 +1,5 @@ /* - * Copyright 1999-2019 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -39,8 +39,8 @@ extern "C" { * (Prior to 0.9.5a beta1, a different scheme was used: MMNNFFRBB for * major minor fix final patch/beta) */ -# define OPENSSL_VERSION_NUMBER 0x1010105fL -# define OPENSSL_VERSION_TEXT "OpenSSL 1.1.1e 17 Mar 2020" +# define OPENSSL_VERSION_NUMBER 0x1010106fL +# define OPENSSL_VERSION_TEXT "OpenSSL 1.1.1f 31 Mar 2020" /*- * The macros below are to be used for shared library (.so, .dll, ...) diff --git a/deps/openssl/openssl/include/openssl/sslerr.h b/deps/openssl/openssl/include/openssl/sslerr.h index 0ef684f3c13192..82983d3c1e99fd 100644 --- a/deps/openssl/openssl/include/openssl/sslerr.h +++ b/deps/openssl/openssl/include/openssl/sslerr.h @@ -734,7 +734,6 @@ int ERR_load_SSL_strings(void); # define SSL_R_UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES 243 # define SSL_R_UNEXPECTED_CCS_MESSAGE 262 # define SSL_R_UNEXPECTED_END_OF_EARLY_DATA 178 -# define SSL_R_UNEXPECTED_EOF_WHILE_READING 294 # define SSL_R_UNEXPECTED_MESSAGE 244 # define SSL_R_UNEXPECTED_RECORD 245 # define SSL_R_UNINITIALIZED 276 diff --git a/deps/openssl/openssl/ssl/record/rec_layer_s3.c b/deps/openssl/openssl/ssl/record/rec_layer_s3.c index 1c885a664f35e3..b2a7a47eb07529 100644 --- a/deps/openssl/openssl/ssl/record/rec_layer_s3.c +++ b/deps/openssl/openssl/ssl/record/rec_layer_s3.c @@ -296,12 +296,6 @@ int ssl3_read_n(SSL *s, size_t n, size_t max, int extend, int clearold, ret = BIO_read(s->rbio, pkt + len + left, max - left); if (ret >= 0) bioread = ret; - if (ret <= 0 - && !BIO_should_retry(s->rbio) - && BIO_eof(s->rbio)) { - SSLfatal(s, SSL_AD_DECODE_ERROR, SSL_F_SSL3_READ_N, - SSL_R_UNEXPECTED_EOF_WHILE_READING); - } } else { SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_SSL3_READ_N, SSL_R_READ_BIO_NOT_SET); diff --git a/deps/openssl/openssl/ssl/ssl_err.c b/deps/openssl/openssl/ssl/ssl_err.c index a0c7b79659d4d7..4b12ed1485d985 100644 --- a/deps/openssl/openssl/ssl/ssl_err.c +++ b/deps/openssl/openssl/ssl/ssl_err.c @@ -1,6 +1,6 @@ /* * Generated by util/mkerr.pl DO NOT EDIT - * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -1205,8 +1205,6 @@ static const ERR_STRING_DATA SSL_str_reasons[] = { "unexpected ccs message"}, {ERR_PACK(ERR_LIB_SSL, 0, SSL_R_UNEXPECTED_END_OF_EARLY_DATA), "unexpected end of early data"}, - {ERR_PACK(ERR_LIB_SSL, 0, SSL_R_UNEXPECTED_EOF_WHILE_READING), - "unexpected eof while reading"}, {ERR_PACK(ERR_LIB_SSL, 0, SSL_R_UNEXPECTED_MESSAGE), "unexpected message"}, {ERR_PACK(ERR_LIB_SSL, 0, SSL_R_UNEXPECTED_RECORD), "unexpected record"}, {ERR_PACK(ERR_LIB_SSL, 0, SSL_R_UNINITIALIZED), "uninitialized"}, diff --git a/deps/openssl/openssl/ssl/ssl_lib.c b/deps/openssl/openssl/ssl/ssl_lib.c index b9adc45a8e471c..7c7e59789ccdc9 100644 --- a/deps/openssl/openssl/ssl/ssl_lib.c +++ b/deps/openssl/openssl/ssl/ssl_lib.c @@ -2630,7 +2630,7 @@ char *SSL_get_shared_ciphers(const SSL *s, char *buf, int size) * - if we are before or during/after the handshake, * - if a resumption or normal handshake is being attempted/has occurred * - whether we have negotiated TLSv1.2 (or below) or TLSv1.3 - * + * * Note that only the host_name type is defined (RFC 3546). */ const char *SSL_get_servername(const SSL *s, const int type) diff --git a/deps/openssl/openssl/test/dsa_no_digest_size_test.c b/deps/openssl/openssl/test/dsa_no_digest_size_test.c index b107635e87cc14..88c6036f53ed3c 100644 --- a/deps/openssl/openssl/test/dsa_no_digest_size_test.c +++ b/deps/openssl/openssl/test/dsa_no_digest_size_test.c @@ -242,3 +242,4 @@ int setup_tests(void) #endif return 1; } +