Update boringssl and curl(v7.50.0)

Change-Id: I89b40cf03d1aab9a13b0df449e540ab73d03451e
This commit is contained in:
Kongqun Yang 2016-08-02 10:41:03 -07:00
parent 2907724416
commit 16d8583735
80 changed files with 44262 additions and 3647 deletions

8
DEPS
View File

@ -9,9 +9,7 @@
vars = { vars = {
"chromium_git": "https://chromium.googlesource.com", "chromium_git": "https://chromium.googlesource.com",
"googlesource_git": "https://%s.googlesource.com", "googlesource_git": "https://%s.googlesource.com",
"curl_url": "https://github.com/bagder/curl.git", "curl_url": "https://github.com/curl/curl.git",
# TODO(kqyang): Replace with an official release.
"curl_rev": "26ddc536b0ab5fc62d6503c82c34dd3dbf112dc3",
} }
deps = { deps = {
@ -33,10 +31,10 @@ deps = {
# Make sure the version matches the one in # Make sure the version matches the one in
# src/packager/third_party/boringssl, which contains perl generated files. # src/packager/third_party/boringssl, which contains perl generated files.
"src/packager/third_party/boringssl/src": "src/packager/third_party/boringssl/src":
(Var("googlesource_git") % "boringssl") + "/boringssl@209b2562235f7dab66b8260624e7b3c5b00d14a6", (Var("googlesource_git") % "boringssl") + "/boringssl@3cab5572b1fcf5a8f6018529dc30dc8d21b2a4bd",
"src/packager/third_party/curl/source": "src/packager/third_party/curl/source":
Var("curl_url") + "@" + Var("curl_rev"), Var("curl_url") + "@curl-7_50_0",
"src/packager/third_party/gflags": "src/packager/third_party/gflags":
Var("chromium_git") + "/external/webrtc/trunk/third_party/gflags@cc7e9a4b374ff7b6a1cae4d76161113ea985b624", Var("chromium_git") + "/external/webrtc/trunk/third_party/gflags@cc7e9a4b374ff7b6a1cae4d76161113ea985b624",

View File

@ -19,7 +19,7 @@ FILE* g_rand_source_fp = NULL;
const char kFakePrngDataFile[] = "fake_prng_data.bin"; const char kFakePrngDataFile[] = "fake_prng_data.bin";
// RAND_bytes and RAND_pseudorand implementation. // RAND_bytes and RAND_pseudorand implementation.
int FakeBytes(uint8_t* buf, int num) { int FakeBytes(uint8_t* buf, size_t num) {
DCHECK(buf); DCHECK(buf);
DCHECK(g_rand_source_fp); DCHECK(g_rand_source_fp);

View File

@ -0,0 +1,459 @@
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is created by generate_build_files.py. Do not edit manually.
crypto_sources = [
"err_data.c",
"src/crypto/aes/aes.c",
"src/crypto/aes/mode_wrappers.c",
"src/crypto/asn1/a_bitstr.c",
"src/crypto/asn1/a_bool.c",
"src/crypto/asn1/a_bytes.c",
"src/crypto/asn1/a_d2i_fp.c",
"src/crypto/asn1/a_dup.c",
"src/crypto/asn1/a_enum.c",
"src/crypto/asn1/a_gentm.c",
"src/crypto/asn1/a_i2d_fp.c",
"src/crypto/asn1/a_int.c",
"src/crypto/asn1/a_mbstr.c",
"src/crypto/asn1/a_object.c",
"src/crypto/asn1/a_octet.c",
"src/crypto/asn1/a_print.c",
"src/crypto/asn1/a_strnid.c",
"src/crypto/asn1/a_time.c",
"src/crypto/asn1/a_type.c",
"src/crypto/asn1/a_utctm.c",
"src/crypto/asn1/a_utf8.c",
"src/crypto/asn1/asn1_lib.c",
"src/crypto/asn1/asn1_par.c",
"src/crypto/asn1/asn_pack.c",
"src/crypto/asn1/f_enum.c",
"src/crypto/asn1/f_int.c",
"src/crypto/asn1/f_string.c",
"src/crypto/asn1/t_bitst.c",
"src/crypto/asn1/tasn_dec.c",
"src/crypto/asn1/tasn_enc.c",
"src/crypto/asn1/tasn_fre.c",
"src/crypto/asn1/tasn_new.c",
"src/crypto/asn1/tasn_typ.c",
"src/crypto/asn1/tasn_utl.c",
"src/crypto/asn1/x_bignum.c",
"src/crypto/asn1/x_long.c",
"src/crypto/base64/base64.c",
"src/crypto/bio/bio.c",
"src/crypto/bio/bio_mem.c",
"src/crypto/bio/buffer.c",
"src/crypto/bio/connect.c",
"src/crypto/bio/fd.c",
"src/crypto/bio/file.c",
"src/crypto/bio/hexdump.c",
"src/crypto/bio/pair.c",
"src/crypto/bio/printf.c",
"src/crypto/bio/socket.c",
"src/crypto/bio/socket_helper.c",
"src/crypto/bn/add.c",
"src/crypto/bn/asm/x86_64-gcc.c",
"src/crypto/bn/bn.c",
"src/crypto/bn/bn_asn1.c",
"src/crypto/bn/cmp.c",
"src/crypto/bn/convert.c",
"src/crypto/bn/ctx.c",
"src/crypto/bn/div.c",
"src/crypto/bn/exponentiation.c",
"src/crypto/bn/gcd.c",
"src/crypto/bn/generic.c",
"src/crypto/bn/kronecker.c",
"src/crypto/bn/montgomery.c",
"src/crypto/bn/mul.c",
"src/crypto/bn/prime.c",
"src/crypto/bn/random.c",
"src/crypto/bn/rsaz_exp.c",
"src/crypto/bn/shift.c",
"src/crypto/bn/sqrt.c",
"src/crypto/buf/buf.c",
"src/crypto/bytestring/asn1_compat.c",
"src/crypto/bytestring/ber.c",
"src/crypto/bytestring/cbb.c",
"src/crypto/bytestring/cbs.c",
"src/crypto/chacha/chacha.c",
"src/crypto/cipher/aead.c",
"src/crypto/cipher/cipher.c",
"src/crypto/cipher/derive_key.c",
"src/crypto/cipher/e_aes.c",
"src/crypto/cipher/e_chacha20poly1305.c",
"src/crypto/cipher/e_des.c",
"src/crypto/cipher/e_null.c",
"src/crypto/cipher/e_rc2.c",
"src/crypto/cipher/e_rc4.c",
"src/crypto/cipher/e_ssl3.c",
"src/crypto/cipher/e_tls.c",
"src/crypto/cipher/tls_cbc.c",
"src/crypto/cmac/cmac.c",
"src/crypto/conf/conf.c",
"src/crypto/cpu-aarch64-linux.c",
"src/crypto/cpu-arm-linux.c",
"src/crypto/cpu-arm.c",
"src/crypto/cpu-intel.c",
"src/crypto/crypto.c",
"src/crypto/curve25519/curve25519.c",
"src/crypto/curve25519/spake25519.c",
"src/crypto/curve25519/x25519-x86_64.c",
"src/crypto/des/des.c",
"src/crypto/dh/check.c",
"src/crypto/dh/dh.c",
"src/crypto/dh/dh_asn1.c",
"src/crypto/dh/params.c",
"src/crypto/digest/digest.c",
"src/crypto/digest/digests.c",
"src/crypto/dsa/dsa.c",
"src/crypto/dsa/dsa_asn1.c",
"src/crypto/ec/ec.c",
"src/crypto/ec/ec_asn1.c",
"src/crypto/ec/ec_key.c",
"src/crypto/ec/ec_montgomery.c",
"src/crypto/ec/oct.c",
"src/crypto/ec/p224-64.c",
"src/crypto/ec/p256-64.c",
"src/crypto/ec/p256-x86_64.c",
"src/crypto/ec/simple.c",
"src/crypto/ec/util-64.c",
"src/crypto/ec/wnaf.c",
"src/crypto/ecdh/ecdh.c",
"src/crypto/ecdsa/ecdsa.c",
"src/crypto/ecdsa/ecdsa_asn1.c",
"src/crypto/engine/engine.c",
"src/crypto/err/err.c",
"src/crypto/evp/digestsign.c",
"src/crypto/evp/evp.c",
"src/crypto/evp/evp_asn1.c",
"src/crypto/evp/evp_ctx.c",
"src/crypto/evp/p_dsa_asn1.c",
"src/crypto/evp/p_ec.c",
"src/crypto/evp/p_ec_asn1.c",
"src/crypto/evp/p_rsa.c",
"src/crypto/evp/p_rsa_asn1.c",
"src/crypto/evp/pbkdf.c",
"src/crypto/evp/print.c",
"src/crypto/evp/sign.c",
"src/crypto/ex_data.c",
"src/crypto/hkdf/hkdf.c",
"src/crypto/hmac/hmac.c",
"src/crypto/lhash/lhash.c",
"src/crypto/md4/md4.c",
"src/crypto/md5/md5.c",
"src/crypto/mem.c",
"src/crypto/modes/cbc.c",
"src/crypto/modes/cfb.c",
"src/crypto/modes/ctr.c",
"src/crypto/modes/gcm.c",
"src/crypto/modes/ofb.c",
"src/crypto/newhope/error_correction.c",
"src/crypto/newhope/newhope.c",
"src/crypto/newhope/ntt.c",
"src/crypto/newhope/poly.c",
"src/crypto/newhope/precomp.c",
"src/crypto/newhope/reduce.c",
"src/crypto/obj/obj.c",
"src/crypto/obj/obj_xref.c",
"src/crypto/pem/pem_all.c",
"src/crypto/pem/pem_info.c",
"src/crypto/pem/pem_lib.c",
"src/crypto/pem/pem_oth.c",
"src/crypto/pem/pem_pk8.c",
"src/crypto/pem/pem_pkey.c",
"src/crypto/pem/pem_x509.c",
"src/crypto/pem/pem_xaux.c",
"src/crypto/pkcs8/p5_pbe.c",
"src/crypto/pkcs8/p5_pbev2.c",
"src/crypto/pkcs8/p8_pkey.c",
"src/crypto/pkcs8/pkcs8.c",
"src/crypto/poly1305/poly1305.c",
"src/crypto/poly1305/poly1305_arm.c",
"src/crypto/poly1305/poly1305_vec.c",
"src/crypto/rand/deterministic.c",
"src/crypto/rand/rand.c",
"src/crypto/rand/urandom.c",
"src/crypto/rand/windows.c",
"src/crypto/rc4/rc4.c",
"src/crypto/refcount_c11.c",
"src/crypto/refcount_lock.c",
"src/crypto/rsa/blinding.c",
"src/crypto/rsa/padding.c",
"src/crypto/rsa/rsa.c",
"src/crypto/rsa/rsa_asn1.c",
"src/crypto/rsa/rsa_impl.c",
"src/crypto/sha/sha1.c",
"src/crypto/sha/sha256.c",
"src/crypto/sha/sha512.c",
"src/crypto/stack/stack.c",
"src/crypto/thread.c",
"src/crypto/thread_none.c",
"src/crypto/thread_pthread.c",
"src/crypto/thread_win.c",
"src/crypto/time_support.c",
"src/crypto/x509/a_digest.c",
"src/crypto/x509/a_sign.c",
"src/crypto/x509/a_strex.c",
"src/crypto/x509/a_verify.c",
"src/crypto/x509/algorithm.c",
"src/crypto/x509/asn1_gen.c",
"src/crypto/x509/by_dir.c",
"src/crypto/x509/by_file.c",
"src/crypto/x509/i2d_pr.c",
"src/crypto/x509/pkcs7.c",
"src/crypto/x509/rsa_pss.c",
"src/crypto/x509/t_crl.c",
"src/crypto/x509/t_req.c",
"src/crypto/x509/t_x509.c",
"src/crypto/x509/t_x509a.c",
"src/crypto/x509/x509.c",
"src/crypto/x509/x509_att.c",
"src/crypto/x509/x509_cmp.c",
"src/crypto/x509/x509_d2.c",
"src/crypto/x509/x509_def.c",
"src/crypto/x509/x509_ext.c",
"src/crypto/x509/x509_lu.c",
"src/crypto/x509/x509_obj.c",
"src/crypto/x509/x509_r2x.c",
"src/crypto/x509/x509_req.c",
"src/crypto/x509/x509_set.c",
"src/crypto/x509/x509_trs.c",
"src/crypto/x509/x509_txt.c",
"src/crypto/x509/x509_v3.c",
"src/crypto/x509/x509_vfy.c",
"src/crypto/x509/x509_vpm.c",
"src/crypto/x509/x509cset.c",
"src/crypto/x509/x509name.c",
"src/crypto/x509/x509rset.c",
"src/crypto/x509/x509spki.c",
"src/crypto/x509/x509type.c",
"src/crypto/x509/x_algor.c",
"src/crypto/x509/x_all.c",
"src/crypto/x509/x_attrib.c",
"src/crypto/x509/x_crl.c",
"src/crypto/x509/x_exten.c",
"src/crypto/x509/x_info.c",
"src/crypto/x509/x_name.c",
"src/crypto/x509/x_pkey.c",
"src/crypto/x509/x_pubkey.c",
"src/crypto/x509/x_req.c",
"src/crypto/x509/x_sig.c",
"src/crypto/x509/x_spki.c",
"src/crypto/x509/x_val.c",
"src/crypto/x509/x_x509.c",
"src/crypto/x509/x_x509a.c",
"src/crypto/x509v3/pcy_cache.c",
"src/crypto/x509v3/pcy_data.c",
"src/crypto/x509v3/pcy_lib.c",
"src/crypto/x509v3/pcy_map.c",
"src/crypto/x509v3/pcy_node.c",
"src/crypto/x509v3/pcy_tree.c",
"src/crypto/x509v3/v3_akey.c",
"src/crypto/x509v3/v3_akeya.c",
"src/crypto/x509v3/v3_alt.c",
"src/crypto/x509v3/v3_bcons.c",
"src/crypto/x509v3/v3_bitst.c",
"src/crypto/x509v3/v3_conf.c",
"src/crypto/x509v3/v3_cpols.c",
"src/crypto/x509v3/v3_crld.c",
"src/crypto/x509v3/v3_enum.c",
"src/crypto/x509v3/v3_extku.c",
"src/crypto/x509v3/v3_genn.c",
"src/crypto/x509v3/v3_ia5.c",
"src/crypto/x509v3/v3_info.c",
"src/crypto/x509v3/v3_int.c",
"src/crypto/x509v3/v3_lib.c",
"src/crypto/x509v3/v3_ncons.c",
"src/crypto/x509v3/v3_pci.c",
"src/crypto/x509v3/v3_pcia.c",
"src/crypto/x509v3/v3_pcons.c",
"src/crypto/x509v3/v3_pku.c",
"src/crypto/x509v3/v3_pmaps.c",
"src/crypto/x509v3/v3_prn.c",
"src/crypto/x509v3/v3_purp.c",
"src/crypto/x509v3/v3_skey.c",
"src/crypto/x509v3/v3_sxnet.c",
"src/crypto/x509v3/v3_utl.c",
]
ssl_sources = [
"src/ssl/custom_extensions.c",
"src/ssl/d1_both.c",
"src/ssl/d1_lib.c",
"src/ssl/d1_meth.c",
"src/ssl/d1_pkt.c",
"src/ssl/d1_srtp.c",
"src/ssl/dtls_record.c",
"src/ssl/handshake_client.c",
"src/ssl/handshake_server.c",
"src/ssl/pqueue/pqueue.c",
"src/ssl/s3_both.c",
"src/ssl/s3_enc.c",
"src/ssl/s3_lib.c",
"src/ssl/s3_meth.c",
"src/ssl/s3_pkt.c",
"src/ssl/ssl_aead_ctx.c",
"src/ssl/ssl_asn1.c",
"src/ssl/ssl_buffer.c",
"src/ssl/ssl_cert.c",
"src/ssl/ssl_cipher.c",
"src/ssl/ssl_ecdh.c",
"src/ssl/ssl_file.c",
"src/ssl/ssl_lib.c",
"src/ssl/ssl_rsa.c",
"src/ssl/ssl_session.c",
"src/ssl/ssl_stat.c",
"src/ssl/t1_enc.c",
"src/ssl/t1_lib.c",
"src/ssl/tls_record.c",
]
crypto_sources_linux_aarch64 = [
"linux-aarch64/crypto/aes/aesv8-armx64.S",
"linux-aarch64/crypto/bn/armv8-mont.S",
"linux-aarch64/crypto/chacha/chacha-armv8.S",
"linux-aarch64/crypto/modes/ghashv8-armx64.S",
"linux-aarch64/crypto/sha/sha1-armv8.S",
"linux-aarch64/crypto/sha/sha256-armv8.S",
"linux-aarch64/crypto/sha/sha512-armv8.S",
]
crypto_sources_linux_arm = [
"linux-arm/crypto/aes/aes-armv4.S",
"linux-arm/crypto/aes/aesv8-armx32.S",
"linux-arm/crypto/aes/bsaes-armv7.S",
"linux-arm/crypto/bn/armv4-mont.S",
"linux-arm/crypto/chacha/chacha-armv4.S",
"linux-arm/crypto/modes/ghash-armv4.S",
"linux-arm/crypto/modes/ghashv8-armx32.S",
"linux-arm/crypto/sha/sha1-armv4-large.S",
"linux-arm/crypto/sha/sha256-armv4.S",
"linux-arm/crypto/sha/sha512-armv4.S",
"src/crypto/curve25519/asm/x25519-asm-arm.S",
"src/crypto/poly1305/poly1305_arm_asm.S",
]
crypto_sources_linux_x86 = [
"linux-x86/crypto/aes/aes-586.S",
"linux-x86/crypto/aes/aesni-x86.S",
"linux-x86/crypto/aes/vpaes-x86.S",
"linux-x86/crypto/bn/bn-586.S",
"linux-x86/crypto/bn/co-586.S",
"linux-x86/crypto/bn/x86-mont.S",
"linux-x86/crypto/chacha/chacha-x86.S",
"linux-x86/crypto/md5/md5-586.S",
"linux-x86/crypto/modes/ghash-x86.S",
"linux-x86/crypto/rc4/rc4-586.S",
"linux-x86/crypto/sha/sha1-586.S",
"linux-x86/crypto/sha/sha256-586.S",
"linux-x86/crypto/sha/sha512-586.S",
]
crypto_sources_linux_x86_64 = [
"linux-x86_64/crypto/aes/aes-x86_64.S",
"linux-x86_64/crypto/aes/aesni-x86_64.S",
"linux-x86_64/crypto/aes/bsaes-x86_64.S",
"linux-x86_64/crypto/aes/vpaes-x86_64.S",
"linux-x86_64/crypto/bn/rsaz-avx2.S",
"linux-x86_64/crypto/bn/rsaz-x86_64.S",
"linux-x86_64/crypto/bn/x86_64-mont.S",
"linux-x86_64/crypto/bn/x86_64-mont5.S",
"linux-x86_64/crypto/chacha/chacha-x86_64.S",
"linux-x86_64/crypto/ec/p256-x86_64-asm.S",
"linux-x86_64/crypto/md5/md5-x86_64.S",
"linux-x86_64/crypto/modes/aesni-gcm-x86_64.S",
"linux-x86_64/crypto/modes/ghash-x86_64.S",
"linux-x86_64/crypto/rand/rdrand-x86_64.S",
"linux-x86_64/crypto/rc4/rc4-x86_64.S",
"linux-x86_64/crypto/sha/sha1-x86_64.S",
"linux-x86_64/crypto/sha/sha256-x86_64.S",
"linux-x86_64/crypto/sha/sha512-x86_64.S",
"src/crypto/curve25519/asm/x25519-asm-x86_64.S",
]
crypto_sources_mac_x86 = [
"mac-x86/crypto/aes/aes-586.S",
"mac-x86/crypto/aes/aesni-x86.S",
"mac-x86/crypto/aes/vpaes-x86.S",
"mac-x86/crypto/bn/bn-586.S",
"mac-x86/crypto/bn/co-586.S",
"mac-x86/crypto/bn/x86-mont.S",
"mac-x86/crypto/chacha/chacha-x86.S",
"mac-x86/crypto/md5/md5-586.S",
"mac-x86/crypto/modes/ghash-x86.S",
"mac-x86/crypto/rc4/rc4-586.S",
"mac-x86/crypto/sha/sha1-586.S",
"mac-x86/crypto/sha/sha256-586.S",
"mac-x86/crypto/sha/sha512-586.S",
]
crypto_sources_mac_x86_64 = [
"mac-x86_64/crypto/aes/aes-x86_64.S",
"mac-x86_64/crypto/aes/aesni-x86_64.S",
"mac-x86_64/crypto/aes/bsaes-x86_64.S",
"mac-x86_64/crypto/aes/vpaes-x86_64.S",
"mac-x86_64/crypto/bn/rsaz-avx2.S",
"mac-x86_64/crypto/bn/rsaz-x86_64.S",
"mac-x86_64/crypto/bn/x86_64-mont.S",
"mac-x86_64/crypto/bn/x86_64-mont5.S",
"mac-x86_64/crypto/chacha/chacha-x86_64.S",
"mac-x86_64/crypto/ec/p256-x86_64-asm.S",
"mac-x86_64/crypto/md5/md5-x86_64.S",
"mac-x86_64/crypto/modes/aesni-gcm-x86_64.S",
"mac-x86_64/crypto/modes/ghash-x86_64.S",
"mac-x86_64/crypto/rand/rdrand-x86_64.S",
"mac-x86_64/crypto/rc4/rc4-x86_64.S",
"mac-x86_64/crypto/sha/sha1-x86_64.S",
"mac-x86_64/crypto/sha/sha256-x86_64.S",
"mac-x86_64/crypto/sha/sha512-x86_64.S",
"src/crypto/curve25519/asm/x25519-asm-x86_64.S",
]
crypto_sources_win_x86 = [
"win-x86/crypto/aes/aes-586.asm",
"win-x86/crypto/aes/aesni-x86.asm",
"win-x86/crypto/aes/vpaes-x86.asm",
"win-x86/crypto/bn/bn-586.asm",
"win-x86/crypto/bn/co-586.asm",
"win-x86/crypto/bn/x86-mont.asm",
"win-x86/crypto/chacha/chacha-x86.asm",
"win-x86/crypto/md5/md5-586.asm",
"win-x86/crypto/modes/ghash-x86.asm",
"win-x86/crypto/rc4/rc4-586.asm",
"win-x86/crypto/sha/sha1-586.asm",
"win-x86/crypto/sha/sha256-586.asm",
"win-x86/crypto/sha/sha512-586.asm",
]
crypto_sources_win_x86_64 = [
"win-x86_64/crypto/aes/aes-x86_64.asm",
"win-x86_64/crypto/aes/aesni-x86_64.asm",
"win-x86_64/crypto/aes/bsaes-x86_64.asm",
"win-x86_64/crypto/aes/vpaes-x86_64.asm",
"win-x86_64/crypto/bn/rsaz-avx2.asm",
"win-x86_64/crypto/bn/rsaz-x86_64.asm",
"win-x86_64/crypto/bn/x86_64-mont.asm",
"win-x86_64/crypto/bn/x86_64-mont5.asm",
"win-x86_64/crypto/chacha/chacha-x86_64.asm",
"win-x86_64/crypto/ec/p256-x86_64-asm.asm",
"win-x86_64/crypto/md5/md5-x86_64.asm",
"win-x86_64/crypto/modes/aesni-gcm-x86_64.asm",
"win-x86_64/crypto/modes/ghash-x86_64.asm",
"win-x86_64/crypto/rand/rdrand-x86_64.asm",
"win-x86_64/crypto/rc4/rc4-x86_64.asm",
"win-x86_64/crypto/sha/sha1-x86_64.asm",
"win-x86_64/crypto/sha/sha256-x86_64.asm",
"win-x86_64/crypto/sha/sha512-x86_64.asm",
]
fuzzers = [
"cert",
"client",
"pkcs8",
"privkey",
"read_pem",
"server",
"spki",
]

View File

@ -0,0 +1,597 @@
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is created by generate_build_files.py. Do not edit manually.
_test_support_sources = [
"src/crypto/test/file_test.cc",
"src/crypto/test/file_test.h",
"src/crypto/test/malloc.cc",
"src/crypto/test/scoped_types.h",
"src/crypto/test/test_util.cc",
"src/crypto/test/test_util.h",
"src/ssl/test/async_bio.h",
"src/ssl/test/packeted_bio.h",
"src/ssl/test/scoped_types.h",
"src/ssl/test/test_config.h",
]
template("create_tests") {
executable("boringssl_aes_test") {
sources = [
"src/crypto/aes/aes_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_asn1_test") {
sources = [
"src/crypto/asn1/asn1_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_base64_test") {
sources = [
"src/crypto/base64/base64_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_bio_test") {
sources = [
"src/crypto/bio/bio_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_bn_test") {
sources = [
"src/crypto/bn/bn_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_bytestring_test") {
sources = [
"src/crypto/bytestring/bytestring_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_chacha_test") {
sources = [
"src/crypto/chacha/chacha_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_aead_test") {
sources = [
"src/crypto/cipher/aead_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_cipher_test") {
sources = [
"src/crypto/cipher/cipher_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_cmac_test") {
sources = [
"src/crypto/cmac/cmac_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_constant_time_test") {
sources = [
"src/crypto/constant_time_test.c",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_ed25519_test") {
sources = [
"src/crypto/curve25519/ed25519_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_spake25519_test") {
sources = [
"src/crypto/curve25519/spake25519_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_x25519_test") {
sources = [
"src/crypto/curve25519/x25519_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_dh_test") {
sources = [
"src/crypto/dh/dh_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_digest_test") {
sources = [
"src/crypto/digest/digest_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_dsa_test") {
sources = [
"src/crypto/dsa/dsa_test.c",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_ec_test") {
sources = [
"src/crypto/ec/ec_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_example_mul") {
sources = [
"src/crypto/ec/example_mul.c",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_ecdsa_test") {
sources = [
"src/crypto/ecdsa/ecdsa_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_err_test") {
sources = [
"src/crypto/err/err_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_evp_extra_test") {
sources = [
"src/crypto/evp/evp_extra_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_evp_test") {
sources = [
"src/crypto/evp/evp_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_pbkdf_test") {
sources = [
"src/crypto/evp/pbkdf_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_hkdf_test") {
sources = [
"src/crypto/hkdf/hkdf_test.c",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_hmac_test") {
sources = [
"src/crypto/hmac/hmac_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_lhash_test") {
sources = [
"src/crypto/lhash/lhash_test.c",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_gcm_test") {
sources = [
"src/crypto/modes/gcm_test.c",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_newhope_statistical_test") {
sources = [
"src/crypto/newhope/newhope_statistical_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_newhope_test") {
sources = [
"src/crypto/newhope/newhope_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_newhope_vectors_test") {
sources = [
"src/crypto/newhope/newhope_vectors_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_obj_test") {
sources = [
"src/crypto/obj/obj_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_pkcs12_test") {
sources = [
"src/crypto/pkcs8/pkcs12_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_pkcs8_test") {
sources = [
"src/crypto/pkcs8/pkcs8_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_poly1305_test") {
sources = [
"src/crypto/poly1305/poly1305_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_refcount_test") {
sources = [
"src/crypto/refcount_test.c",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_rsa_test") {
sources = [
"src/crypto/rsa/rsa_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_thread_test") {
sources = [
"src/crypto/thread_test.c",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_pkcs7_test") {
sources = [
"src/crypto/x509/pkcs7_test.c",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_x509_test") {
sources = [
"src/crypto/x509/x509_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_tab_test") {
sources = [
"src/crypto/x509v3/tab_test.c",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_v3name_test") {
sources = [
"src/crypto/x509v3/v3name_test.c",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_pqueue_test") {
sources = [
"src/ssl/pqueue/pqueue_test.c",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
executable("boringssl_ssl_test") {
sources = [
"src/ssl/ssl_test.cc",
]
sources += _test_support_sources
if (defined(invoker.configs_exclude)) {
configs -= invoker.configs_exclude
}
configs += invoker.configs
deps = invoker.deps
}
group(target_name) {
deps = [
":boringssl_aead_test",
":boringssl_aes_test",
":boringssl_asn1_test",
":boringssl_base64_test",
":boringssl_bio_test",
":boringssl_bn_test",
":boringssl_bytestring_test",
":boringssl_chacha_test",
":boringssl_cipher_test",
":boringssl_cmac_test",
":boringssl_constant_time_test",
":boringssl_dh_test",
":boringssl_digest_test",
":boringssl_dsa_test",
":boringssl_ec_test",
":boringssl_ecdsa_test",
":boringssl_ed25519_test",
":boringssl_err_test",
":boringssl_evp_extra_test",
":boringssl_evp_test",
":boringssl_example_mul",
":boringssl_gcm_test",
":boringssl_hkdf_test",
":boringssl_hmac_test",
":boringssl_lhash_test",
":boringssl_newhope_statistical_test",
":boringssl_newhope_test",
":boringssl_newhope_vectors_test",
":boringssl_obj_test",
":boringssl_pbkdf_test",
":boringssl_pkcs12_test",
":boringssl_pkcs7_test",
":boringssl_pkcs8_test",
":boringssl_poly1305_test",
":boringssl_pqueue_test",
":boringssl_refcount_test",
":boringssl_rsa_test",
":boringssl_spake25519_test",
":boringssl_ssl_test",
":boringssl_tab_test",
":boringssl_thread_test",
":boringssl_v3name_test",
":boringssl_x25519_test",
":boringssl_x509_test",
]
}
}

View File

@ -2,26 +2,51 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import("//build/config/android/config.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//build_overrides/build.gni")
import("//testing/libfuzzer/fuzzer_test.gni")
import("BUILD.generated.gni")
import("BUILD.generated_tests.gni")
# Config for us and everybody else depending on BoringSSL. # Config for us and everybody else depending on BoringSSL.
config("openssl_config") { config("external_config") {
include_dirs = [] include_dirs = [ "src/include" ]
include_dirs += [ "src/include" ]
if (is_component_build) { if (is_component_build) {
defines = [ "BORINGSSL_SHARED_LIBRARY" ] defines = [ "BORINGSSL_SHARED_LIBRARY" ]
} }
} }
# Config internal to this build file. # Config internal to this build file, shared by boringssl and boringssl_fuzzer.
config("openssl_internal_config") { config("internal_config") {
visibility = [ ":*" ] # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
defines = [
"BORINGSSL_IMPLEMENTATION",
"BORINGSSL_NO_STATIC_INITIALIZER",
"OPENSSL_SMALL",
]
configs = [
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
"//build/config/compiler:no_size_t_to_int_warning",
]
if (is_posix) {
cflags_c = [ "-std=c99" ]
defines += [ "_XOPEN_SOURCE=700" ]
}
} }
# The list of BoringSSL files is kept in boringssl.gypi. config("no_asm_config") {
gypi_values = visibility = [ ":*" ] # Only targets in this file can depend on this.
exec_script("//build/gypi_to_gn.py", defines = [ "OPENSSL_NO_ASM" ]
[ rebase_path("//third_party/boringssl/boringssl.gypi") ], }
"scope",
[ "//third_party/boringssl/boringssl.gypi" ]) config("fuzzer_config") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
defines = [ "BORINGSSL_UNSAFE_FUZZER_MODE" ]
}
all_sources = crypto_sources + ssl_sources
# Windows' assembly is built with Yasm. The other platforms use the platform # Windows' assembly is built with Yasm. The other platforms use the platform
# assembler. # assembler.
@ -29,75 +54,159 @@ if (is_win && !is_msan) {
import("//third_party/yasm/yasm_assemble.gni") import("//third_party/yasm/yasm_assemble.gni")
yasm_assemble("boringssl_asm") { yasm_assemble("boringssl_asm") {
if (current_cpu == "x64") { if (current_cpu == "x64") {
sources = gypi_values.boringssl_win_x86_64_sources sources = crypto_sources_win_x86_64
} else if (current_cpu == "x86") { } else if (current_cpu == "x86") {
sources = gypi_values.boringssl_win_x86_sources sources = crypto_sources_win_x86
}
}
} else {
source_set("boringssl_asm") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
defines = []
sources = []
asmflags = []
include_dirs = [ "src/include" ]
if ((current_cpu == "arm" || current_cpu == "arm64") && is_clang) {
if (current_cpu == "arm") {
# TODO(hans) Enable integrated-as (crbug.com/124610).
asmflags += [ "-fno-integrated-as" ]
}
if (is_android) {
rebased_android_toolchain_root =
rebase_path(android_toolchain_root, root_build_dir)
# Else /usr/bin/as gets picked up.
asmflags += [ "-B${rebased_android_toolchain_root}/bin" ]
}
}
if (is_msan) {
public_configs = [ ":no_asm_config" ]
} else if (current_cpu == "x64") {
if (is_mac) {
sources += crypto_sources_mac_x86_64
} else if (is_linux || is_android) {
sources += crypto_sources_linux_x86_64
} else {
public_configs = [ ":no_asm_config" ]
}
} else if (current_cpu == "x86") {
if (is_mac) {
sources += crypto_sources_mac_x86
} else if (is_linux || is_android) {
sources += crypto_sources_linux_x86
} else {
public_configs = [ ":no_asm_config" ]
}
} else if (current_cpu == "arm" && (is_linux || is_android)) {
sources += crypto_sources_linux_arm
} else if (current_cpu == "arm64" && (is_linux || is_android)) {
sources += crypto_sources_linux_aarch64
# TODO(davidben): Remove explicit arch flag once https://crbug.com/576858
# is fixed.
asmflags += [ "-march=armv8-a+crypto" ]
} else {
public_configs = [ ":no_asm_config" ]
} }
} }
} }
component("boringssl") { component("boringssl") {
sources = gypi_values.boringssl_crypto_sources sources = all_sources
sources += gypi_values.boringssl_ssl_sources deps = [
":boringssl_asm",
public_configs = [ ":openssl_config" ]
cflags = []
defines = [
"BORINGSSL_IMPLEMENTATION",
"BORINGSSL_NO_STATIC_INITIALIZER",
] ]
deps = []
if (is_component_build) { public_configs = [ ":external_config" ]
defines += [ "BORINGSSL_SHARED_LIBRARY" ] configs += [ ":internal_config" ]
}
configs -= [ "//build/config/compiler:chromium_code" ] configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ configs += [ "//build/config/compiler:no_chromium_code" ]
"//build/config/compiler:no_chromium_code",
# TODO(davidben): Fix size_t truncations in BoringSSL. if (is_nacl) {
# https://crbug.com/429039 deps += [ "//native_client_sdk/src/libraries/nacl_io" ]
"//build/config/compiler:no_size_t_to_int_warning", }
] }
# Also gets the include dirs from :openssl_config if (build_with_chromium) {
include_dirs = [ create_tests("boringssl_tests") {
"src/include", configs_exclude = [ "//build/config/compiler:chromium_code" ]
configs = [
# This is for arm_arch.h, which is needed by some asm files. Since the ":internal_config",
# asm files are generated and kept in a different directory, they "//build/config/compiler:no_chromium_code",
# cannot use relative paths to find this file. ]
"src/crypto", deps = [
] ":boringssl",
"//build/win:default_exe_manifest",
if (is_msan) { ]
defines += [ "OPENSSL_NO_ASM" ] }
} else if (current_cpu == "x64") {
if (is_mac || is_ios) { if (!is_ios) {
sources += gypi_values.boringssl_mac_x86_64_sources test("boringssl_unittests") {
} else if (is_linux || is_android) { deps = [
sources += gypi_values.boringssl_linux_x86_64_sources ":boringssl_tests",
} else if (is_win) { "//base",
deps += [ ":boringssl_asm" ] "//base/test:run_all_unittests",
} else { "//base/test:test_support",
defines += [ "OPENSSL_NO_ASM" ] "//testing/gtest",
} ]
} else if (current_cpu == "x86") { sources = [
if (is_mac || is_ios) { "boringssl_unittest.cc",
sources += gypi_values.boringssl_mac_x86_sources ]
} else if (is_linux || is_android) { }
sources += gypi_values.boringssl_linux_x86_sources }
} else if (is_win) {
deps += [ ":boringssl_asm" ] # The same as boringssl, but builds with BORINGSSL_UNSAFE_FUZZER_MODE.
} else { component("boringssl_fuzzer") {
defines += [ "OPENSSL_NO_ASM" ] visibility = [ ":*" ] # Only targets in this file can depend on this.
}
} else if (current_cpu == "arm" && (is_linux || is_android)) { sources = all_sources
sources += gypi_values.boringssl_linux_arm_sources deps = [
} else if (current_cpu == "arm64" && (is_linux || is_android)) { ":boringssl_asm",
sources += gypi_values.boringssl_linux_aarch64_sources ]
} else {
defines += [ "OPENSSL_NO_ASM" ] public_configs = [
":external_config",
":fuzzer_config",
]
configs += [ ":internal_config" ]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
if (is_nacl) {
deps += [ "//native_client_sdk/src/libraries/nacl_io" ]
}
}
foreach(fuzzer, fuzzers) {
fuzzer_test("boringssl_${fuzzer}_fuzzer") {
sources = [
"src/fuzz/${fuzzer}.cc",
]
deps = [
":boringssl_fuzzer",
]
seed_corpus = "src/fuzz/${fuzzer}_corpus"
if ("cert" == fuzzer) {
libfuzzer_options = [ "max_len=3072" ]
} else if ("client" == fuzzer) {
libfuzzer_options = [ "max_len=20000" ]
} else if ("pkcs8" == fuzzer) {
libfuzzer_options = [ "max_len=2048" ]
} else if ("privkey" == fuzzer) {
libfuzzer_options = [ "max_len=2048" ]
} else if ("read_pem" == fuzzer) {
libfuzzer_options = [ "max_len=512" ]
} else if ("server" == fuzzer) {
libfuzzer_options = [ "max_len=4096" ]
} else if ("spki" == fuzzer) {
libfuzzer_options = [ "max_len=1024" ]
}
}
} }
} }

View File

@ -2,7 +2,7 @@ Name: boringssl
URL: https://boringssl.googlesource.com/boringssl URL: https://boringssl.googlesource.com/boringssl
Version: git Version: git
License: BSDish License: BSDish
License File: NOTICE License File: src/LICENSE
License Android Compatible: yes License Android Compatible: yes
Security Critical: yes Security Critical: yes
@ -13,6 +13,7 @@ https://www.imperialviolet.org/2014/06/20/boringssl.html
Note: when rolling DEPS forward, remember to run Note: when rolling DEPS forward, remember to run
cd third_party/boringssl cd third_party/boringssl
python src/util/generate_build_files.py chromium python src/util/generate_build_files.py gn gyp
from a system with both Perl and Go installed. from a system with both Perl and Go installed. Alternatively, use the
roll_boringssl.py script.

View File

@ -3,21 +3,35 @@
# found in the LICENSE file. # found in the LICENSE file.
{ {
'includes': [
'boringssl.gypi',
],
'target_defaults': {
'conditions': [
['os_posix == 1', {
'cflags_c': [ '-std=c99' ],
'defines': [ '_XOPEN_SOURCE=700' ],
}],
],
},
'targets': [ 'targets': [
{ {
'target_name': 'boringssl', 'target_name': 'boringssl_nacl_win64',
'type': '<(component)', 'type': '<(component)',
'includes': [
'boringssl.gypi',
],
'sources': [ 'sources': [
'<@(boringssl_crypto_sources)', '<@(boringssl_crypto_sources)',
'<@(boringssl_ssl_sources)',
], ],
'defines': [ 'defines': [
'BORINGSSL_IMPLEMENTATION', 'BORINGSSL_IMPLEMENTATION',
'BORINGSSL_NO_STATIC_INITIALIZER', 'BORINGSSL_NO_STATIC_INITIALIZER',
'OPENSSL_NO_ASM',
'OPENSSL_SMALL',
], ],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
# TODO(davidben): Fix size_t truncations in BoringSSL. # TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039 # https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ], 'msvs_disabled_warnings': [ 4267, ],
@ -27,12 +41,88 @@
'BORINGSSL_SHARED_LIBRARY', 'BORINGSSL_SHARED_LIBRARY',
], ],
}], }],
],
'include_dirs': [
'src/include',
],
'direct_dependent_settings': {
'include_dirs': [
'src/include',
],
'conditions': [
['component == "shared_library"', {
'defines': [
'BORINGSSL_SHARED_LIBRARY',
],
}],
],
},
},
{
'target_name': 'boringssl',
'type': '<(component)',
'sources': [
'<@(boringssl_crypto_sources)',
'<@(boringssl_ssl_sources)',
],
'defines': [
'BORINGSSL_IMPLEMENTATION',
'BORINGSSL_NO_STATIC_INITIALIZER',
'OPENSSL_SMALL',
],
'dependencies': [ 'boringssl_asm' ],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
'conditions': [
['component == "shared_library"', {
'defines': [
'BORINGSSL_SHARED_LIBRARY',
],
}],
],
'include_dirs': [
'src/include',
],
'direct_dependent_settings': {
'include_dirs': [
'src/include',
],
'conditions': [
['component == "shared_library"', {
'defines': [
'BORINGSSL_SHARED_LIBRARY',
],
}],
],
},
},
{
# boringssl_asm is a separate target to allow for ASM-specific cflags.
'target_name': 'boringssl_asm',
'type': 'static_library',
'include_dirs': [
'src/include',
],
'conditions': [
['target_arch == "arm" and msan == 0', { ['target_arch == "arm" and msan == 0', {
'conditions': [ 'conditions': [
['OS == "linux" or OS == "android"', { ['OS == "linux" or OS == "android"', {
'sources': [ '<@(boringssl_linux_arm_sources)' ], 'sources': [ '<@(boringssl_linux_arm_sources)' ],
}, { }, {
'defines': [ 'OPENSSL_NO_ASM' ], 'direct_dependent_settings': {
'defines': [ 'OPENSSL_NO_ASM' ],
},
}],
],
}],
['target_arch == "arm" and clang == 1', {
# TODO(hans) Enable integrated-as (crbug.com/124610).
'cflags': [ '-fno-integrated-as' ],
'conditions': [
['OS == "android"', {
# Else /usr/bin/as gets picked up.
'cflags': [ '-B<(android_toolchain)' ],
}], }],
], ],
}], }],
@ -40,14 +130,19 @@
'conditions': [ 'conditions': [
['OS == "linux" or OS == "android"', { ['OS == "linux" or OS == "android"', {
'sources': [ '<@(boringssl_linux_aarch64_sources)' ], 'sources': [ '<@(boringssl_linux_aarch64_sources)' ],
# TODO(davidben): Remove explicit arch flag once
# https://crbug.com/576858 is fixed.
'cflags': [ '-march=armv8-a+crypto' ],
}, { }, {
'defines': [ 'OPENSSL_NO_ASM' ], 'direct_dependent_settings': {
'defines': [ 'OPENSSL_NO_ASM' ],
},
}], }],
], ],
}], }],
['target_arch == "ia32" and msan == 0', { ['target_arch == "ia32" and msan == 0', {
'conditions': [ 'conditions': [
['OS == "mac" or OS == "ios"', { ['OS == "mac"', {
'sources': [ '<@(boringssl_mac_x86_sources)' ], 'sources': [ '<@(boringssl_mac_x86_sources)' ],
}], }],
['OS == "linux" or OS == "android"', { ['OS == "linux" or OS == "android"', {
@ -64,14 +159,16 @@
'../yasm/yasm_compile.gypi', '../yasm/yasm_compile.gypi',
], ],
}], }],
['OS != "mac" and OS != "ios" and OS != "linux" and OS != "win" and OS != "android"', { ['OS != "mac" and OS != "linux" and OS != "win" and OS != "android"', {
'defines': [ 'OPENSSL_NO_ASM' ], 'direct_dependent_settings': {
'defines': [ 'OPENSSL_NO_ASM' ],
},
}], }],
] ]
}], }],
['target_arch == "x64" and msan == 0', { ['target_arch == "x64" and msan == 0', {
'conditions': [ 'conditions': [
['OS == "mac" or OS == "ios"', { ['OS == "mac"', {
'sources': [ '<@(boringssl_mac_x86_64_sources)' ], 'sources': [ '<@(boringssl_mac_x86_64_sources)' ],
}], }],
['OS == "linux" or OS == "android"', { ['OS == "linux" or OS == "android"', {
@ -88,34 +185,19 @@
'../yasm/yasm_compile.gypi', '../yasm/yasm_compile.gypi',
], ],
}], }],
['OS != "mac" and OS != "ios" and OS != "linux" and OS != "win" and OS != "android"', { ['OS != "mac" and OS != "linux" and OS != "win" and OS != "android"', {
'defines': [ 'OPENSSL_NO_ASM' ], 'direct_dependent_settings': {
'defines': [ 'OPENSSL_NO_ASM' ],
},
}], }],
] ]
}], }],
['msan == 1 or (target_arch != "arm" and target_arch != "ia32" and target_arch != "x64" and target_arch != "arm64")', { ['msan == 1 or (target_arch != "arm" and target_arch != "ia32" and target_arch != "x64" and target_arch != "arm64")', {
'defines': [ 'OPENSSL_NO_ASM' ], 'direct_dependent_settings': {
'defines': [ 'OPENSSL_NO_ASM' ],
},
}], }],
], ],
'include_dirs': [
'src/include',
# This is for arm_arch.h, which is needed by some asm files. Since the
# asm files are generated and kept in a different directory, they
# cannot use relative paths to find this file.
'src/crypto',
],
'direct_dependent_settings': {
'include_dirs': [
'src/include',
],
'conditions': [
['component == "shared_library"', {
'defines': [
'BORINGSSL_SHARED_LIBRARY',
],
}],
],
},
}, },
], ],
} }

View File

@ -1,4 +1,4 @@
# Copyright (c) 2014 The Chromium Authors. All rights reserved. # Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
@ -7,33 +7,35 @@
{ {
'variables': { 'variables': {
'boringssl_ssl_sources': [ 'boringssl_ssl_sources': [
'src/ssl/custom_extensions.c',
'src/ssl/d1_both.c', 'src/ssl/d1_both.c',
'src/ssl/d1_clnt.c',
'src/ssl/d1_lib.c', 'src/ssl/d1_lib.c',
'src/ssl/d1_meth.c', 'src/ssl/d1_meth.c',
'src/ssl/d1_pkt.c', 'src/ssl/d1_pkt.c',
'src/ssl/d1_srtp.c', 'src/ssl/d1_srtp.c',
'src/ssl/d1_srvr.c', 'src/ssl/dtls_record.c',
'src/ssl/handshake_client.c',
'src/ssl/handshake_server.c',
'src/ssl/pqueue/pqueue.c', 'src/ssl/pqueue/pqueue.c',
'src/ssl/s3_both.c', 'src/ssl/s3_both.c',
'src/ssl/s3_clnt.c',
'src/ssl/s3_enc.c', 'src/ssl/s3_enc.c',
'src/ssl/s3_lib.c', 'src/ssl/s3_lib.c',
'src/ssl/s3_meth.c', 'src/ssl/s3_meth.c',
'src/ssl/s3_pkt.c', 'src/ssl/s3_pkt.c',
'src/ssl/s3_srvr.c',
'src/ssl/ssl_aead_ctx.c', 'src/ssl/ssl_aead_ctx.c',
'src/ssl/ssl_algs.c',
'src/ssl/ssl_asn1.c', 'src/ssl/ssl_asn1.c',
'src/ssl/ssl_buffer.c',
'src/ssl/ssl_cert.c', 'src/ssl/ssl_cert.c',
'src/ssl/ssl_cipher.c', 'src/ssl/ssl_cipher.c',
'src/ssl/ssl_ecdh.c',
'src/ssl/ssl_file.c',
'src/ssl/ssl_lib.c', 'src/ssl/ssl_lib.c',
'src/ssl/ssl_rsa.c', 'src/ssl/ssl_rsa.c',
'src/ssl/ssl_sess.c', 'src/ssl/ssl_session.c',
'src/ssl/ssl_stat.c', 'src/ssl/ssl_stat.c',
'src/ssl/ssl_txt.c',
'src/ssl/t1_enc.c', 'src/ssl/t1_enc.c',
'src/ssl/t1_lib.c', 'src/ssl/t1_lib.c',
'src/ssl/tls_record.c',
], ],
'boringssl_crypto_sources': [ 'boringssl_crypto_sources': [
'err_data.c', 'err_data.c',
@ -60,18 +62,14 @@
'src/crypto/asn1/asn1_lib.c', 'src/crypto/asn1/asn1_lib.c',
'src/crypto/asn1/asn1_par.c', 'src/crypto/asn1/asn1_par.c',
'src/crypto/asn1/asn_pack.c', 'src/crypto/asn1/asn_pack.c',
'src/crypto/asn1/bio_asn1.c',
'src/crypto/asn1/bio_ndef.c',
'src/crypto/asn1/f_enum.c', 'src/crypto/asn1/f_enum.c',
'src/crypto/asn1/f_int.c', 'src/crypto/asn1/f_int.c',
'src/crypto/asn1/f_string.c', 'src/crypto/asn1/f_string.c',
'src/crypto/asn1/t_bitst.c', 'src/crypto/asn1/t_bitst.c',
'src/crypto/asn1/t_pkey.c',
'src/crypto/asn1/tasn_dec.c', 'src/crypto/asn1/tasn_dec.c',
'src/crypto/asn1/tasn_enc.c', 'src/crypto/asn1/tasn_enc.c',
'src/crypto/asn1/tasn_fre.c', 'src/crypto/asn1/tasn_fre.c',
'src/crypto/asn1/tasn_new.c', 'src/crypto/asn1/tasn_new.c',
'src/crypto/asn1/tasn_prn.c',
'src/crypto/asn1/tasn_typ.c', 'src/crypto/asn1/tasn_typ.c',
'src/crypto/asn1/tasn_utl.c', 'src/crypto/asn1/tasn_utl.c',
'src/crypto/asn1/x_bignum.c', 'src/crypto/asn1/x_bignum.c',
@ -108,11 +106,11 @@
'src/crypto/bn/shift.c', 'src/crypto/bn/shift.c',
'src/crypto/bn/sqrt.c', 'src/crypto/bn/sqrt.c',
'src/crypto/buf/buf.c', 'src/crypto/buf/buf.c',
'src/crypto/bytestring/asn1_compat.c',
'src/crypto/bytestring/ber.c', 'src/crypto/bytestring/ber.c',
'src/crypto/bytestring/cbb.c', 'src/crypto/bytestring/cbb.c',
'src/crypto/bytestring/cbs.c', 'src/crypto/bytestring/cbs.c',
'src/crypto/chacha/chacha_generic.c', 'src/crypto/chacha/chacha.c',
'src/crypto/chacha/chacha_vec.c',
'src/crypto/cipher/aead.c', 'src/crypto/cipher/aead.c',
'src/crypto/cipher/cipher.c', 'src/crypto/cipher/cipher.c',
'src/crypto/cipher/derive_key.c', 'src/crypto/cipher/derive_key.c',
@ -127,28 +125,31 @@
'src/crypto/cipher/tls_cbc.c', 'src/crypto/cipher/tls_cbc.c',
'src/crypto/cmac/cmac.c', 'src/crypto/cmac/cmac.c',
'src/crypto/conf/conf.c', 'src/crypto/conf/conf.c',
'src/crypto/cpu-aarch64-linux.c',
'src/crypto/cpu-arm-linux.c',
'src/crypto/cpu-arm.c', 'src/crypto/cpu-arm.c',
'src/crypto/cpu-intel.c', 'src/crypto/cpu-intel.c',
'src/crypto/crypto.c', 'src/crypto/crypto.c',
'src/crypto/curve25519/curve25519.c',
'src/crypto/curve25519/spake25519.c',
'src/crypto/curve25519/x25519-x86_64.c',
'src/crypto/des/des.c', 'src/crypto/des/des.c',
'src/crypto/dh/check.c', 'src/crypto/dh/check.c',
'src/crypto/dh/dh.c', 'src/crypto/dh/dh.c',
'src/crypto/dh/dh_asn1.c', 'src/crypto/dh/dh_asn1.c',
'src/crypto/dh/dh_impl.c',
'src/crypto/dh/params.c', 'src/crypto/dh/params.c',
'src/crypto/digest/digest.c', 'src/crypto/digest/digest.c',
'src/crypto/digest/digests.c', 'src/crypto/digest/digests.c',
'src/crypto/directory_posix.c',
'src/crypto/directory_win.c',
'src/crypto/dsa/dsa.c', 'src/crypto/dsa/dsa.c',
'src/crypto/dsa/dsa_asn1.c', 'src/crypto/dsa/dsa_asn1.c',
'src/crypto/dsa/dsa_impl.c',
'src/crypto/ec/ec.c', 'src/crypto/ec/ec.c',
'src/crypto/ec/ec_asn1.c', 'src/crypto/ec/ec_asn1.c',
'src/crypto/ec/ec_key.c', 'src/crypto/ec/ec_key.c',
'src/crypto/ec/ec_montgomery.c', 'src/crypto/ec/ec_montgomery.c',
'src/crypto/ec/oct.c', 'src/crypto/ec/oct.c',
'src/crypto/ec/p224-64.c',
'src/crypto/ec/p256-64.c', 'src/crypto/ec/p256-64.c',
'src/crypto/ec/p256-x86_64.c',
'src/crypto/ec/simple.c', 'src/crypto/ec/simple.c',
'src/crypto/ec/util-64.c', 'src/crypto/ec/util-64.c',
'src/crypto/ec/wnaf.c', 'src/crypto/ec/wnaf.c',
@ -157,7 +158,6 @@
'src/crypto/ecdsa/ecdsa_asn1.c', 'src/crypto/ecdsa/ecdsa_asn1.c',
'src/crypto/engine/engine.c', 'src/crypto/engine/engine.c',
'src/crypto/err/err.c', 'src/crypto/err/err.c',
'src/crypto/evp/algorithm.c',
'src/crypto/evp/digestsign.c', 'src/crypto/evp/digestsign.c',
'src/crypto/evp/evp.c', 'src/crypto/evp/evp.c',
'src/crypto/evp/evp_asn1.c', 'src/crypto/evp/evp_asn1.c',
@ -168,6 +168,7 @@
'src/crypto/evp/p_rsa.c', 'src/crypto/evp/p_rsa.c',
'src/crypto/evp/p_rsa_asn1.c', 'src/crypto/evp/p_rsa_asn1.c',
'src/crypto/evp/pbkdf.c', 'src/crypto/evp/pbkdf.c',
'src/crypto/evp/print.c',
'src/crypto/evp/sign.c', 'src/crypto/evp/sign.c',
'src/crypto/ex_data.c', 'src/crypto/ex_data.c',
'src/crypto/hkdf/hkdf.c', 'src/crypto/hkdf/hkdf.c',
@ -181,6 +182,12 @@
'src/crypto/modes/ctr.c', 'src/crypto/modes/ctr.c',
'src/crypto/modes/gcm.c', 'src/crypto/modes/gcm.c',
'src/crypto/modes/ofb.c', 'src/crypto/modes/ofb.c',
'src/crypto/newhope/error_correction.c',
'src/crypto/newhope/newhope.c',
'src/crypto/newhope/ntt.c',
'src/crypto/newhope/poly.c',
'src/crypto/newhope/precomp.c',
'src/crypto/newhope/reduce.c',
'src/crypto/obj/obj.c', 'src/crypto/obj/obj.c',
'src/crypto/obj/obj_xref.c', 'src/crypto/obj/obj_xref.c',
'src/crypto/pem/pem_all.c', 'src/crypto/pem/pem_all.c',
@ -198,7 +205,7 @@
'src/crypto/poly1305/poly1305.c', 'src/crypto/poly1305/poly1305.c',
'src/crypto/poly1305/poly1305_arm.c', 'src/crypto/poly1305/poly1305_arm.c',
'src/crypto/poly1305/poly1305_vec.c', 'src/crypto/poly1305/poly1305_vec.c',
'src/crypto/rand/hwrand.c', 'src/crypto/rand/deterministic.c',
'src/crypto/rand/rand.c', 'src/crypto/rand/rand.c',
'src/crypto/rand/urandom.c', 'src/crypto/rand/urandom.c',
'src/crypto/rand/windows.c', 'src/crypto/rand/windows.c',
@ -223,11 +230,13 @@
'src/crypto/x509/a_sign.c', 'src/crypto/x509/a_sign.c',
'src/crypto/x509/a_strex.c', 'src/crypto/x509/a_strex.c',
'src/crypto/x509/a_verify.c', 'src/crypto/x509/a_verify.c',
'src/crypto/x509/algorithm.c',
'src/crypto/x509/asn1_gen.c', 'src/crypto/x509/asn1_gen.c',
'src/crypto/x509/by_dir.c', 'src/crypto/x509/by_dir.c',
'src/crypto/x509/by_file.c', 'src/crypto/x509/by_file.c',
'src/crypto/x509/i2d_pr.c', 'src/crypto/x509/i2d_pr.c',
'src/crypto/x509/pkcs7.c', 'src/crypto/x509/pkcs7.c',
'src/crypto/x509/rsa_pss.c',
'src/crypto/x509/t_crl.c', 'src/crypto/x509/t_crl.c',
'src/crypto/x509/t_req.c', 'src/crypto/x509/t_req.c',
'src/crypto/x509/t_x509.c', 'src/crypto/x509/t_x509.c',
@ -303,6 +312,8 @@
], ],
'boringssl_linux_aarch64_sources': [ 'boringssl_linux_aarch64_sources': [
'linux-aarch64/crypto/aes/aesv8-armx64.S', 'linux-aarch64/crypto/aes/aesv8-armx64.S',
'linux-aarch64/crypto/bn/armv8-mont.S',
'linux-aarch64/crypto/chacha/chacha-armv8.S',
'linux-aarch64/crypto/modes/ghashv8-armx64.S', 'linux-aarch64/crypto/modes/ghashv8-armx64.S',
'linux-aarch64/crypto/sha/sha1-armv8.S', 'linux-aarch64/crypto/sha/sha1-armv8.S',
'linux-aarch64/crypto/sha/sha256-armv8.S', 'linux-aarch64/crypto/sha/sha256-armv8.S',
@ -313,13 +324,13 @@
'linux-arm/crypto/aes/aesv8-armx32.S', 'linux-arm/crypto/aes/aesv8-armx32.S',
'linux-arm/crypto/aes/bsaes-armv7.S', 'linux-arm/crypto/aes/bsaes-armv7.S',
'linux-arm/crypto/bn/armv4-mont.S', 'linux-arm/crypto/bn/armv4-mont.S',
'linux-arm/crypto/chacha/chacha-armv4.S',
'linux-arm/crypto/modes/ghash-armv4.S', 'linux-arm/crypto/modes/ghash-armv4.S',
'linux-arm/crypto/modes/ghashv8-armx32.S', 'linux-arm/crypto/modes/ghashv8-armx32.S',
'linux-arm/crypto/sha/sha1-armv4-large.S', 'linux-arm/crypto/sha/sha1-armv4-large.S',
'linux-arm/crypto/sha/sha256-armv4.S', 'linux-arm/crypto/sha/sha256-armv4.S',
'linux-arm/crypto/sha/sha512-armv4.S', 'linux-arm/crypto/sha/sha512-armv4.S',
'src/crypto/chacha/chacha_vec_arm.S', 'src/crypto/curve25519/asm/x25519-asm-arm.S',
'src/crypto/cpu-arm-asm.S',
'src/crypto/poly1305/poly1305_arm_asm.S', 'src/crypto/poly1305/poly1305_arm_asm.S',
], ],
'boringssl_linux_x86_sources': [ 'boringssl_linux_x86_sources': [
@ -329,6 +340,7 @@
'linux-x86/crypto/bn/bn-586.S', 'linux-x86/crypto/bn/bn-586.S',
'linux-x86/crypto/bn/co-586.S', 'linux-x86/crypto/bn/co-586.S',
'linux-x86/crypto/bn/x86-mont.S', 'linux-x86/crypto/bn/x86-mont.S',
'linux-x86/crypto/chacha/chacha-x86.S',
'linux-x86/crypto/md5/md5-586.S', 'linux-x86/crypto/md5/md5-586.S',
'linux-x86/crypto/modes/ghash-x86.S', 'linux-x86/crypto/modes/ghash-x86.S',
'linux-x86/crypto/rc4/rc4-586.S', 'linux-x86/crypto/rc4/rc4-586.S',
@ -345,15 +357,17 @@
'linux-x86_64/crypto/bn/rsaz-x86_64.S', 'linux-x86_64/crypto/bn/rsaz-x86_64.S',
'linux-x86_64/crypto/bn/x86_64-mont.S', 'linux-x86_64/crypto/bn/x86_64-mont.S',
'linux-x86_64/crypto/bn/x86_64-mont5.S', 'linux-x86_64/crypto/bn/x86_64-mont5.S',
'linux-x86_64/crypto/chacha/chacha-x86_64.S',
'linux-x86_64/crypto/ec/p256-x86_64-asm.S',
'linux-x86_64/crypto/md5/md5-x86_64.S', 'linux-x86_64/crypto/md5/md5-x86_64.S',
'linux-x86_64/crypto/modes/aesni-gcm-x86_64.S', 'linux-x86_64/crypto/modes/aesni-gcm-x86_64.S',
'linux-x86_64/crypto/modes/ghash-x86_64.S', 'linux-x86_64/crypto/modes/ghash-x86_64.S',
'linux-x86_64/crypto/rand/rdrand-x86_64.S', 'linux-x86_64/crypto/rand/rdrand-x86_64.S',
'linux-x86_64/crypto/rc4/rc4-md5-x86_64.S',
'linux-x86_64/crypto/rc4/rc4-x86_64.S', 'linux-x86_64/crypto/rc4/rc4-x86_64.S',
'linux-x86_64/crypto/sha/sha1-x86_64.S', 'linux-x86_64/crypto/sha/sha1-x86_64.S',
'linux-x86_64/crypto/sha/sha256-x86_64.S', 'linux-x86_64/crypto/sha/sha256-x86_64.S',
'linux-x86_64/crypto/sha/sha512-x86_64.S', 'linux-x86_64/crypto/sha/sha512-x86_64.S',
'src/crypto/curve25519/asm/x25519-asm-x86_64.S',
], ],
'boringssl_mac_x86_sources': [ 'boringssl_mac_x86_sources': [
'mac-x86/crypto/aes/aes-586.S', 'mac-x86/crypto/aes/aes-586.S',
@ -362,6 +376,7 @@
'mac-x86/crypto/bn/bn-586.S', 'mac-x86/crypto/bn/bn-586.S',
'mac-x86/crypto/bn/co-586.S', 'mac-x86/crypto/bn/co-586.S',
'mac-x86/crypto/bn/x86-mont.S', 'mac-x86/crypto/bn/x86-mont.S',
'mac-x86/crypto/chacha/chacha-x86.S',
'mac-x86/crypto/md5/md5-586.S', 'mac-x86/crypto/md5/md5-586.S',
'mac-x86/crypto/modes/ghash-x86.S', 'mac-x86/crypto/modes/ghash-x86.S',
'mac-x86/crypto/rc4/rc4-586.S', 'mac-x86/crypto/rc4/rc4-586.S',
@ -378,15 +393,17 @@
'mac-x86_64/crypto/bn/rsaz-x86_64.S', 'mac-x86_64/crypto/bn/rsaz-x86_64.S',
'mac-x86_64/crypto/bn/x86_64-mont.S', 'mac-x86_64/crypto/bn/x86_64-mont.S',
'mac-x86_64/crypto/bn/x86_64-mont5.S', 'mac-x86_64/crypto/bn/x86_64-mont5.S',
'mac-x86_64/crypto/chacha/chacha-x86_64.S',
'mac-x86_64/crypto/ec/p256-x86_64-asm.S',
'mac-x86_64/crypto/md5/md5-x86_64.S', 'mac-x86_64/crypto/md5/md5-x86_64.S',
'mac-x86_64/crypto/modes/aesni-gcm-x86_64.S', 'mac-x86_64/crypto/modes/aesni-gcm-x86_64.S',
'mac-x86_64/crypto/modes/ghash-x86_64.S', 'mac-x86_64/crypto/modes/ghash-x86_64.S',
'mac-x86_64/crypto/rand/rdrand-x86_64.S', 'mac-x86_64/crypto/rand/rdrand-x86_64.S',
'mac-x86_64/crypto/rc4/rc4-md5-x86_64.S',
'mac-x86_64/crypto/rc4/rc4-x86_64.S', 'mac-x86_64/crypto/rc4/rc4-x86_64.S',
'mac-x86_64/crypto/sha/sha1-x86_64.S', 'mac-x86_64/crypto/sha/sha1-x86_64.S',
'mac-x86_64/crypto/sha/sha256-x86_64.S', 'mac-x86_64/crypto/sha/sha256-x86_64.S',
'mac-x86_64/crypto/sha/sha512-x86_64.S', 'mac-x86_64/crypto/sha/sha512-x86_64.S',
'src/crypto/curve25519/asm/x25519-asm-x86_64.S',
], ],
'boringssl_win_x86_sources': [ 'boringssl_win_x86_sources': [
'win-x86/crypto/aes/aes-586.asm', 'win-x86/crypto/aes/aes-586.asm',
@ -395,6 +412,7 @@
'win-x86/crypto/bn/bn-586.asm', 'win-x86/crypto/bn/bn-586.asm',
'win-x86/crypto/bn/co-586.asm', 'win-x86/crypto/bn/co-586.asm',
'win-x86/crypto/bn/x86-mont.asm', 'win-x86/crypto/bn/x86-mont.asm',
'win-x86/crypto/chacha/chacha-x86.asm',
'win-x86/crypto/md5/md5-586.asm', 'win-x86/crypto/md5/md5-586.asm',
'win-x86/crypto/modes/ghash-x86.asm', 'win-x86/crypto/modes/ghash-x86.asm',
'win-x86/crypto/rc4/rc4-586.asm', 'win-x86/crypto/rc4/rc4-586.asm',
@ -411,11 +429,12 @@
'win-x86_64/crypto/bn/rsaz-x86_64.asm', 'win-x86_64/crypto/bn/rsaz-x86_64.asm',
'win-x86_64/crypto/bn/x86_64-mont.asm', 'win-x86_64/crypto/bn/x86_64-mont.asm',
'win-x86_64/crypto/bn/x86_64-mont5.asm', 'win-x86_64/crypto/bn/x86_64-mont5.asm',
'win-x86_64/crypto/chacha/chacha-x86_64.asm',
'win-x86_64/crypto/ec/p256-x86_64-asm.asm',
'win-x86_64/crypto/md5/md5-x86_64.asm', 'win-x86_64/crypto/md5/md5-x86_64.asm',
'win-x86_64/crypto/modes/aesni-gcm-x86_64.asm', 'win-x86_64/crypto/modes/aesni-gcm-x86_64.asm',
'win-x86_64/crypto/modes/ghash-x86_64.asm', 'win-x86_64/crypto/modes/ghash-x86_64.asm',
'win-x86_64/crypto/rand/rdrand-x86_64.asm', 'win-x86_64/crypto/rand/rdrand-x86_64.asm',
'win-x86_64/crypto/rc4/rc4-md5-x86_64.asm',
'win-x86_64/crypto/rc4/rc4-x86_64.asm', 'win-x86_64/crypto/rc4/rc4-x86_64.asm',
'win-x86_64/crypto/sha/sha1-x86_64.asm', 'win-x86_64/crypto/sha/sha1-x86_64.asm',
'win-x86_64/crypto/sha/sha256-x86_64.asm', 'win-x86_64/crypto/sha/sha256-x86_64.asm',

View File

@ -31,10 +31,6 @@
], ],
'include_dirs': [ 'include_dirs': [
'src/include', 'src/include',
# This is for arm_arch.h, which is needed by some asm files. Since the
# asm files are generated and kept in a different directory, they
# cannot use relative paths to find this file.
'src/crypto',
], ],
'direct_dependent_settings': { 'direct_dependent_settings': {
'include_dirs': [ 'include_dirs': [

View File

@ -6,20 +6,24 @@
'includes': [ 'includes': [
'boringssl_tests.gypi', 'boringssl_tests.gypi',
], ],
'targets': [ 'conditions': [
{ ['OS!="ios"', {
'target_name': 'boringssl_unittests', 'targets': [
'type': 'executable', {
'sources': [ 'target_name': 'boringssl_unittests',
'boringssl_unittest.cc', 'type': 'executable',
], 'sources': [
'dependencies': [ 'boringssl_unittest.cc',
'<@(boringssl_test_targets)', ],
'../../base/base.gyp:base', 'dependencies': [
'../../base/base.gyp:run_all_unittests', '<@(boringssl_test_targets)',
'../../base/base.gyp:test_support_base', '../../base/base.gyp:base',
'../../testing/gtest.gyp:gtest', '../../base/base.gyp:run_all_unittests',
'../../base/base.gyp:test_support_base',
'../../testing/gtest.gyp:gtest',
],
},
], ],
}, }],
], ],
} }

View File

@ -1,4 +1,4 @@
# Copyright (c) 2014 The Chromium Authors. All rights reserved. # Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
@ -20,6 +20,20 @@
# https://crbug.com/429039 # https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ], 'msvs_disabled_warnings': [ 4267, ],
}, },
{
'target_name': 'boringssl_asn1_test',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'src/crypto/asn1/asn1_test.cc',
'<@(boringssl_test_support_sources)',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},
{ {
'target_name': 'boringssl_base64_test', 'target_name': 'boringssl_base64_test',
'type': 'executable', 'type': 'executable',
@ -76,6 +90,20 @@
# https://crbug.com/429039 # https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ], 'msvs_disabled_warnings': [ 4267, ],
}, },
{
'target_name': 'boringssl_chacha_test',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'src/crypto/chacha/chacha_test.cc',
'<@(boringssl_test_support_sources)',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},
{ {
'target_name': 'boringssl_aead_test', 'target_name': 'boringssl_aead_test',
'type': 'executable', 'type': 'executable',
@ -132,6 +160,48 @@
# https://crbug.com/429039 # https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ], 'msvs_disabled_warnings': [ 4267, ],
}, },
{
'target_name': 'boringssl_ed25519_test',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'src/crypto/curve25519/ed25519_test.cc',
'<@(boringssl_test_support_sources)',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},
{
'target_name': 'boringssl_spake25519_test',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'src/crypto/curve25519/spake25519_test.cc',
'<@(boringssl_test_support_sources)',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},
{
'target_name': 'boringssl_x25519_test',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'src/crypto/curve25519/x25519_test.cc',
'<@(boringssl_test_support_sources)',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},
{ {
'target_name': 'boringssl_dh_test', 'target_name': 'boringssl_dh_test',
'type': 'executable', 'type': 'executable',
@ -328,6 +398,62 @@
# https://crbug.com/429039 # https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ], 'msvs_disabled_warnings': [ 4267, ],
}, },
{
'target_name': 'boringssl_newhope_statistical_test',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'src/crypto/newhope/newhope_statistical_test.cc',
'<@(boringssl_test_support_sources)',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},
{
'target_name': 'boringssl_newhope_test',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'src/crypto/newhope/newhope_test.cc',
'<@(boringssl_test_support_sources)',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},
{
'target_name': 'boringssl_newhope_vectors_test',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'src/crypto/newhope/newhope_vectors_test.cc',
'<@(boringssl_test_support_sources)',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},
{
'target_name': 'boringssl_obj_test',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'src/crypto/obj/obj_test.cc',
'<@(boringssl_test_support_sources)',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},
{ {
'target_name': 'boringssl_pkcs12_test', 'target_name': 'boringssl_pkcs12_test',
'type': 'executable', 'type': 'executable',
@ -342,6 +468,20 @@
# https://crbug.com/429039 # https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ], 'msvs_disabled_warnings': [ 4267, ],
}, },
{
'target_name': 'boringssl_pkcs8_test',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'src/crypto/pkcs8/pkcs8_test.cc',
'<@(boringssl_test_support_sources)',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},
{ {
'target_name': 'boringssl_poly1305_test', 'target_name': 'boringssl_poly1305_test',
'type': 'executable', 'type': 'executable',
@ -412,6 +552,20 @@
# https://crbug.com/429039 # https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ], 'msvs_disabled_warnings': [ 4267, ],
}, },
{
'target_name': 'boringssl_x509_test',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'src/crypto/x509/x509_test.cc',
'<@(boringssl_test_support_sources)',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},
{ {
'target_name': 'boringssl_tab_test', 'target_name': 'boringssl_tab_test',
'type': 'executable', 'type': 'executable',
@ -472,15 +626,25 @@
'variables': { 'variables': {
'boringssl_test_support_sources': [ 'boringssl_test_support_sources': [
'src/crypto/test/file_test.cc', 'src/crypto/test/file_test.cc',
'src/crypto/test/file_test.h',
'src/crypto/test/malloc.cc', 'src/crypto/test/malloc.cc',
'src/crypto/test/scoped_types.h',
'src/crypto/test/test_util.cc',
'src/crypto/test/test_util.h',
'src/ssl/test/async_bio.h',
'src/ssl/test/packeted_bio.h',
'src/ssl/test/scoped_types.h',
'src/ssl/test/test_config.h',
], ],
'boringssl_test_targets': [ 'boringssl_test_targets': [
'boringssl_aead_test', 'boringssl_aead_test',
'boringssl_aes_test', 'boringssl_aes_test',
'boringssl_asn1_test',
'boringssl_base64_test', 'boringssl_base64_test',
'boringssl_bio_test', 'boringssl_bio_test',
'boringssl_bn_test', 'boringssl_bn_test',
'boringssl_bytestring_test', 'boringssl_bytestring_test',
'boringssl_chacha_test',
'boringssl_cipher_test', 'boringssl_cipher_test',
'boringssl_cmac_test', 'boringssl_cmac_test',
'boringssl_constant_time_test', 'boringssl_constant_time_test',
@ -489,6 +653,7 @@
'boringssl_dsa_test', 'boringssl_dsa_test',
'boringssl_ec_test', 'boringssl_ec_test',
'boringssl_ecdsa_test', 'boringssl_ecdsa_test',
'boringssl_ed25519_test',
'boringssl_err_test', 'boringssl_err_test',
'boringssl_evp_extra_test', 'boringssl_evp_extra_test',
'boringssl_evp_test', 'boringssl_evp_test',
@ -497,17 +662,25 @@
'boringssl_hkdf_test', 'boringssl_hkdf_test',
'boringssl_hmac_test', 'boringssl_hmac_test',
'boringssl_lhash_test', 'boringssl_lhash_test',
'boringssl_newhope_statistical_test',
'boringssl_newhope_test',
'boringssl_newhope_vectors_test',
'boringssl_obj_test',
'boringssl_pbkdf_test', 'boringssl_pbkdf_test',
'boringssl_pkcs12_test', 'boringssl_pkcs12_test',
'boringssl_pkcs7_test', 'boringssl_pkcs7_test',
'boringssl_pkcs8_test',
'boringssl_poly1305_test', 'boringssl_poly1305_test',
'boringssl_pqueue_test', 'boringssl_pqueue_test',
'boringssl_refcount_test', 'boringssl_refcount_test',
'boringssl_rsa_test', 'boringssl_rsa_test',
'boringssl_spake25519_test',
'boringssl_ssl_test', 'boringssl_ssl_test',
'boringssl_tab_test', 'boringssl_tab_test',
'boringssl_thread_test', 'boringssl_thread_test',
'boringssl_v3name_test', 'boringssl_v3name_test',
'boringssl_x25519_test',
'boringssl_x509_test',
], ],
} }
} }

View File

@ -161,6 +161,10 @@ TEST(BoringSSL, ByteString) {
TestSimple("bytestring_test"); TestSimple("bytestring_test");
} }
TEST(BoringSSL, ChaCha) {
TestSimple("chacha_test");
}
TEST(BoringSSL, Cipher) { TEST(BoringSSL, Cipher) {
base::FilePath data_file; base::FilePath data_file;
ASSERT_TRUE(CryptoCipherTestPath(&data_file)); ASSERT_TRUE(CryptoCipherTestPath(&data_file));
@ -200,6 +204,19 @@ TEST(BoringSSL, ECDSA) {
TestSimple("ecdsa_test"); TestSimple("ecdsa_test");
} }
TEST(BoringSSL, ED25519) {
base::FilePath data_file;
ASSERT_TRUE(BoringSSLPath(&data_file));
data_file = data_file.Append(FILE_PATH_LITERAL("crypto"));
data_file = data_file.Append(FILE_PATH_LITERAL("curve25519"));
data_file = data_file.Append(FILE_PATH_LITERAL("ed25519_tests.txt"));
std::vector<base::CommandLine::StringType> args;
args.push_back(data_file.value());
TestProcess("ed25519_test", args);
}
TEST(BoringSSL, ERR) { TEST(BoringSSL, ERR) {
TestSimple("err_test"); TestSimple("err_test");
} }
@ -250,6 +267,23 @@ TEST(BoringSSL, LH) {
TestSimple("lhash_test"); TestSimple("lhash_test");
} }
TEST(BoringSSL, NewHope) {
TestSimple("newhope_test");
}
TEST(BoringSSL, NewHopeVectors) {
base::FilePath data_file;
ASSERT_TRUE(BoringSSLPath(&data_file));
data_file = data_file.Append(FILE_PATH_LITERAL("crypto"));
data_file = data_file.Append(FILE_PATH_LITERAL("newhope"));
data_file = data_file.Append(FILE_PATH_LITERAL("newhope_test.txt"));
std::vector<base::CommandLine::StringType> args;
args.push_back(data_file.value());
TestProcess("newhope_vectors_test", args);
}
TEST(BoringSSL, PBKDF) { TEST(BoringSSL, PBKDF) {
TestSimple("pbkdf_test"); TestSimple("pbkdf_test");
} }
@ -271,6 +305,10 @@ TEST(BoringSSL, PKCS7) {
TestSimple("pkcs7_test"); TestSimple("pkcs7_test");
} }
TEST(BoringSSL, PKCS8) {
TestSimple("pkcs8_test");
}
TEST(BoringSSL, PKCS12) { TEST(BoringSSL, PKCS12) {
TestSimple("pkcs12_test"); TestSimple("pkcs12_test");
} }
@ -302,3 +340,11 @@ TEST(BoringSSL, Thread) {
TEST(BoringSSL, V3NameTest) { TEST(BoringSSL, V3NameTest) {
TestSimple("v3name_test"); TestSimple("v3name_test");
} }
TEST(BoringSSL, X25519) {
TestSimple("x25519_test");
}
TEST(BoringSSL, X509) {
TestSimple("x509_test");
}

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
#if defined(__aarch64__) #if defined(__aarch64__)
#include "arm_arch.h" #include <openssl/arm_arch.h>
#if __ARM_MAX_ARCH__>=7 #if __ARM_MAX_ARCH__>=7
.text .text
@ -13,6 +13,7 @@
.long 0x1b,0x1b,0x1b,0x1b .long 0x1b,0x1b,0x1b,0x1b
.globl aes_v8_set_encrypt_key .globl aes_v8_set_encrypt_key
.hidden aes_v8_set_encrypt_key
.type aes_v8_set_encrypt_key,%function .type aes_v8_set_encrypt_key,%function
.align 5 .align 5
aes_v8_set_encrypt_key: aes_v8_set_encrypt_key:
@ -180,6 +181,7 @@ aes_v8_set_encrypt_key:
.size aes_v8_set_encrypt_key,.-aes_v8_set_encrypt_key .size aes_v8_set_encrypt_key,.-aes_v8_set_encrypt_key
.globl aes_v8_set_decrypt_key .globl aes_v8_set_decrypt_key
.hidden aes_v8_set_decrypt_key
.type aes_v8_set_decrypt_key,%function .type aes_v8_set_decrypt_key,%function
.align 5 .align 5
aes_v8_set_decrypt_key: aes_v8_set_decrypt_key:
@ -219,6 +221,7 @@ aes_v8_set_decrypt_key:
ret ret
.size aes_v8_set_decrypt_key,.-aes_v8_set_decrypt_key .size aes_v8_set_decrypt_key,.-aes_v8_set_decrypt_key
.globl aes_v8_encrypt .globl aes_v8_encrypt
.hidden aes_v8_encrypt
.type aes_v8_encrypt,%function .type aes_v8_encrypt,%function
.align 5 .align 5
aes_v8_encrypt: aes_v8_encrypt:
@ -248,6 +251,7 @@ aes_v8_encrypt:
ret ret
.size aes_v8_encrypt,.-aes_v8_encrypt .size aes_v8_encrypt,.-aes_v8_encrypt
.globl aes_v8_decrypt .globl aes_v8_decrypt
.hidden aes_v8_decrypt
.type aes_v8_decrypt,%function .type aes_v8_decrypt,%function
.align 5 .align 5
aes_v8_decrypt: aes_v8_decrypt:
@ -277,6 +281,7 @@ aes_v8_decrypt:
ret ret
.size aes_v8_decrypt,.-aes_v8_decrypt .size aes_v8_decrypt,.-aes_v8_decrypt
.globl aes_v8_cbc_encrypt .globl aes_v8_cbc_encrypt
.hidden aes_v8_cbc_encrypt
.type aes_v8_cbc_encrypt,%function .type aes_v8_cbc_encrypt,%function
.align 5 .align 5
aes_v8_cbc_encrypt: aes_v8_cbc_encrypt:
@ -567,6 +572,7 @@ aes_v8_cbc_encrypt:
ret ret
.size aes_v8_cbc_encrypt,.-aes_v8_cbc_encrypt .size aes_v8_cbc_encrypt,.-aes_v8_cbc_encrypt
.globl aes_v8_ctr32_encrypt_blocks .globl aes_v8_ctr32_encrypt_blocks
.hidden aes_v8_ctr32_encrypt_blocks
.type aes_v8_ctr32_encrypt_blocks,%function .type aes_v8_ctr32_encrypt_blocks,%function
.align 5 .align 5
aes_v8_ctr32_encrypt_blocks: aes_v8_ctr32_encrypt_blocks:
@ -748,4 +754,4 @@ aes_v8_ctr32_encrypt_blocks:
ret ret
.size aes_v8_ctr32_encrypt_blocks,.-aes_v8_ctr32_encrypt_blocks .size aes_v8_ctr32_encrypt_blocks,.-aes_v8_ctr32_encrypt_blocks
#endif #endif
#endif #endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,12 @@
#if defined(__aarch64__) #if defined(__aarch64__)
#include "arm_arch.h" #include <openssl/arm_arch.h>
.text .text
#if !defined(__clang__) #if !defined(__clang__)
.arch armv8-a+crypto .arch armv8-a+crypto
#endif #endif
.globl gcm_init_v8 .globl gcm_init_v8
.hidden gcm_init_v8
.type gcm_init_v8,%function .type gcm_init_v8,%function
.align 4 .align 4
gcm_init_v8: gcm_init_v8:
@ -56,6 +57,7 @@ gcm_init_v8:
ret ret
.size gcm_init_v8,.-gcm_init_v8 .size gcm_init_v8,.-gcm_init_v8
.globl gcm_gmult_v8 .globl gcm_gmult_v8
.hidden gcm_gmult_v8
.type gcm_gmult_v8,%function .type gcm_gmult_v8,%function
.align 4 .align 4
gcm_gmult_v8: gcm_gmult_v8:
@ -68,10 +70,10 @@ gcm_gmult_v8:
#endif #endif
ext v3.16b,v17.16b,v17.16b,#8 ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.loˇXi.lo pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hiˇXi.hi pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)ˇ(Xi.lo+Xi.hi) pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b eor v18.16b,v0.16b,v2.16b
@ -97,6 +99,7 @@ gcm_gmult_v8:
ret ret
.size gcm_gmult_v8,.-gcm_gmult_v8 .size gcm_gmult_v8,.-gcm_gmult_v8
.globl gcm_ghash_v8 .globl gcm_ghash_v8
.hidden gcm_ghash_v8
.type gcm_ghash_v8,%function .type gcm_ghash_v8,%function
.align 4 .align 4
gcm_ghash_v8: gcm_ghash_v8:
@ -135,7 +138,7 @@ gcm_ghash_v8:
#endif #endif
ext v7.16b,v17.16b,v17.16b,#8 ext v7.16b,v17.16b,v17.16b,#8
eor v3.16b,v3.16b,v0.16b //I[i]^=Xi eor v3.16b,v3.16b,v0.16b //I[i]^=Xi
pmull v4.1q,v20.1d,v7.1d //HˇIi+1 pmull v4.1q,v20.1d,v7.1d //H·Ii+1
eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
pmull2 v6.1q,v20.2d,v7.2d pmull2 v6.1q,v20.2d,v7.2d
b .Loop_mod2x_v8 b .Loop_mod2x_v8
@ -144,14 +147,14 @@ gcm_ghash_v8:
.Loop_mod2x_v8: .Loop_mod2x_v8:
ext v18.16b,v3.16b,v3.16b,#8 ext v18.16b,v3.16b,v3.16b,#8
subs x3,x3,#32 //is there more data? subs x3,x3,#32 //is there more data?
pmull v0.1q,v22.1d,v3.1d //H^2.loˇXi.lo pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo
csel x12,xzr,x12,lo //is it time to zero x12? csel x12,xzr,x12,lo //is it time to zero x12?
pmull v5.1q,v21.1d,v17.1d pmull v5.1q,v21.1d,v17.1d
eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v22.2d,v3.2d //H^2.hiˇXi.hi pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi
eor v0.16b,v0.16b,v4.16b //accumulate eor v0.16b,v0.16b,v4.16b //accumulate
pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)ˇ(Xi.lo+Xi.hi) pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2] ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2]
eor v2.16b,v2.16b,v6.16b eor v2.16b,v2.16b,v6.16b
@ -176,7 +179,7 @@ gcm_ghash_v8:
ext v7.16b,v17.16b,v17.16b,#8 ext v7.16b,v17.16b,v17.16b,#8
ext v3.16b,v16.16b,v16.16b,#8 ext v3.16b,v16.16b,v16.16b,#8
eor v0.16b,v1.16b,v18.16b eor v0.16b,v1.16b,v18.16b
pmull v4.1q,v20.1d,v7.1d //HˇIi+1 pmull v4.1q,v20.1d,v7.1d //H·Ii+1
eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
@ -197,10 +200,10 @@ gcm_ghash_v8:
eor v3.16b,v3.16b,v0.16b //inp^=Xi eor v3.16b,v3.16b,v0.16b //inp^=Xi
eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi
pmull v0.1q,v20.1d,v3.1d //H.loˇXi.lo pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hiˇXi.hi pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)ˇ(Xi.lo+Xi.hi) pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b eor v18.16b,v0.16b,v2.16b
@ -229,4 +232,4 @@ gcm_ghash_v8:
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2 .align 2
.align 2 .align 2
#endif #endif

View File

@ -1,10 +1,11 @@
#if defined(__aarch64__) #if defined(__aarch64__)
#include "arm_arch.h" #include <openssl/arm_arch.h>
.text .text
.globl sha1_block_data_order .globl sha1_block_data_order
.hidden sha1_block_data_order
.type sha1_block_data_order,%function .type sha1_block_data_order,%function
.align 6 .align 6
sha1_block_data_order: sha1_block_data_order:
@ -1212,4 +1213,4 @@ sha1_block_armv8:
.align 2 .align 2
.align 2 .align 2
.comm OPENSSL_armcap_P,4,4 .comm OPENSSL_armcap_P,4,4
#endif #endif

View File

@ -1,10 +1,11 @@
#if defined(__aarch64__) #if defined(__aarch64__)
#include "arm_arch.h" #include <openssl/arm_arch.h>
.text .text
.globl sha256_block_data_order .globl sha256_block_data_order
.hidden sha256_block_data_order
.type sha256_block_data_order,%function .type sha256_block_data_order,%function
.align 6 .align 6
sha256_block_data_order: sha256_block_data_order:
@ -1142,4 +1143,4 @@ sha256_block_armv8:
ret ret
.size sha256_block_armv8,.-sha256_block_armv8 .size sha256_block_armv8,.-sha256_block_armv8
.comm OPENSSL_armcap_P,4,4 .comm OPENSSL_armcap_P,4,4
#endif #endif

View File

@ -1,10 +1,11 @@
#if defined(__aarch64__) #if defined(__aarch64__)
#include "arm_arch.h" #include <openssl/arm_arch.h>
.text .text
.globl sha512_block_data_order .globl sha512_block_data_order
.hidden sha512_block_data_order
.type sha512_block_data_order,%function .type sha512_block_data_order,%function
.align 6 .align 6
sha512_block_data_order: sha512_block_data_order:
@ -1022,4 +1023,4 @@ sha512_block_data_order:
.align 2 .align 2
.align 2 .align 2
.comm OPENSSL_armcap_P,4,4 .comm OPENSSL_armcap_P,4,4
#endif #endif

View File

@ -34,7 +34,7 @@
#if defined(__arm__) #if defined(__arm__)
#ifndef __KERNEL__ #ifndef __KERNEL__
# include "arm_arch.h" # include <openssl/arm_arch.h>
#else #else
# define __ARM_ARCH__ __LINUX_ARM_ARCH__ # define __ARM_ARCH__ __LINUX_ARM_ARCH__
#endif #endif
@ -1197,4 +1197,4 @@ _armv4_AES_decrypt:
.align 2 .align 2
#endif #endif
#endif #endif

View File

@ -1,5 +1,5 @@
#if defined(__arm__) #if defined(__arm__)
#include "arm_arch.h" #include <openssl/arm_arch.h>
#if __ARM_MAX_ARCH__>=7 #if __ARM_MAX_ARCH__>=7
.text .text
@ -13,6 +13,7 @@
.long 0x1b,0x1b,0x1b,0x1b .long 0x1b,0x1b,0x1b,0x1b
.globl aes_v8_set_encrypt_key .globl aes_v8_set_encrypt_key
.hidden aes_v8_set_encrypt_key
.type aes_v8_set_encrypt_key,%function .type aes_v8_set_encrypt_key,%function
.align 5 .align 5
aes_v8_set_encrypt_key: aes_v8_set_encrypt_key:
@ -183,6 +184,7 @@ aes_v8_set_encrypt_key:
.size aes_v8_set_encrypt_key,.-aes_v8_set_encrypt_key .size aes_v8_set_encrypt_key,.-aes_v8_set_encrypt_key
.globl aes_v8_set_decrypt_key .globl aes_v8_set_decrypt_key
.hidden aes_v8_set_decrypt_key
.type aes_v8_set_decrypt_key,%function .type aes_v8_set_decrypt_key,%function
.align 5 .align 5
aes_v8_set_decrypt_key: aes_v8_set_decrypt_key:
@ -220,6 +222,7 @@ aes_v8_set_decrypt_key:
ldmia sp!,{r4,pc} ldmia sp!,{r4,pc}
.size aes_v8_set_decrypt_key,.-aes_v8_set_decrypt_key .size aes_v8_set_decrypt_key,.-aes_v8_set_decrypt_key
.globl aes_v8_encrypt .globl aes_v8_encrypt
.hidden aes_v8_encrypt
.type aes_v8_encrypt,%function .type aes_v8_encrypt,%function
.align 5 .align 5
aes_v8_encrypt: aes_v8_encrypt:
@ -249,6 +252,7 @@ aes_v8_encrypt:
bx lr bx lr
.size aes_v8_encrypt,.-aes_v8_encrypt .size aes_v8_encrypt,.-aes_v8_encrypt
.globl aes_v8_decrypt .globl aes_v8_decrypt
.hidden aes_v8_decrypt
.type aes_v8_decrypt,%function .type aes_v8_decrypt,%function
.align 5 .align 5
aes_v8_decrypt: aes_v8_decrypt:
@ -278,6 +282,7 @@ aes_v8_decrypt:
bx lr bx lr
.size aes_v8_decrypt,.-aes_v8_decrypt .size aes_v8_decrypt,.-aes_v8_decrypt
.globl aes_v8_cbc_encrypt .globl aes_v8_cbc_encrypt
.hidden aes_v8_cbc_encrypt
.type aes_v8_cbc_encrypt,%function .type aes_v8_cbc_encrypt,%function
.align 5 .align 5
aes_v8_cbc_encrypt: aes_v8_cbc_encrypt:
@ -570,6 +575,7 @@ aes_v8_cbc_encrypt:
ldmia sp!,{r4,r5,r6,r7,r8,pc} ldmia sp!,{r4,r5,r6,r7,r8,pc}
.size aes_v8_cbc_encrypt,.-aes_v8_cbc_encrypt .size aes_v8_cbc_encrypt,.-aes_v8_cbc_encrypt
.globl aes_v8_ctr32_encrypt_blocks .globl aes_v8_ctr32_encrypt_blocks
.hidden aes_v8_ctr32_encrypt_blocks
.type aes_v8_ctr32_encrypt_blocks,%function .type aes_v8_ctr32_encrypt_blocks,%function
.align 5 .align 5
aes_v8_ctr32_encrypt_blocks: aes_v8_ctr32_encrypt_blocks:
@ -753,4 +759,4 @@ aes_v8_ctr32_encrypt_blocks:
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc} ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc}
.size aes_v8_ctr32_encrypt_blocks,.-aes_v8_ctr32_encrypt_blocks .size aes_v8_ctr32_encrypt_blocks,.-aes_v8_ctr32_encrypt_blocks
#endif #endif
#endif #endif

View File

@ -47,9 +47,8 @@
@ @
@ <ard.biesheuvel@linaro.org> @ <ard.biesheuvel@linaro.org>
#if defined(__arm__)
#ifndef __KERNEL__ #ifndef __KERNEL__
# include "arm_arch.h" # include <openssl/arm_arch.h>
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15} # define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15} # define VFP_ABI_POP vldmia sp!,{d8-d15}
@ -2576,4 +2575,3 @@ bsaes_xts_decrypt:
.size bsaes_xts_decrypt,.-bsaes_xts_decrypt .size bsaes_xts_decrypt,.-bsaes_xts_decrypt
#endif #endif
#endif #endif
#endif

View File

@ -1,5 +1,5 @@
#if defined(__arm__) #if defined(__arm__)
#include "arm_arch.h" #include <openssl/arm_arch.h>
.text .text
.code 32 .code 32
@ -28,7 +28,7 @@ bn_mul_mont:
#ifdef __APPLE__ #ifdef __APPLE__
ldr r0,[r0] ldr r0,[r0]
#endif #endif
tst r0,#1 @ NEON available? tst r0,#ARMV7_NEON @ NEON available?
ldmia sp, {r0,r2} ldmia sp, {r0,r2}
beq .Lialu beq .Lialu
add sp,sp,#8 add sp,sp,#8
@ -586,4 +586,4 @@ bn_mul8x_mont_neon:
.comm OPENSSL_armcap_P,4,4 .comm OPENSSL_armcap_P,4,4
.hidden OPENSSL_armcap_P .hidden OPENSSL_armcap_P
#endif #endif
#endif #endif

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,12 @@
#if defined(__arm__) #if defined(__arm__)
#if defined(__arm__) #include <openssl/arm_arch.h>
#include "arm_arch.h"
.syntax unified .syntax unified
.text .text
.code 32 .code 32
#ifdef __APPLE__ #ifdef __clang__
#define ldrplb ldrbpl #define ldrplb ldrbpl
#define ldrneb ldrbne #define ldrneb ldrbne
#endif #endif
@ -536,6 +535,4 @@ gcm_ghash_neon:
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2 .align 2
.align 2 .align 2
#endif #endif
#endif

View File

@ -1,10 +1,11 @@
#if defined(__arm__) #if defined(__arm__)
#include "arm_arch.h" #include <openssl/arm_arch.h>
.text .text
.fpu neon .fpu neon
.code 32 .code 32
.globl gcm_init_v8 .globl gcm_init_v8
.hidden gcm_init_v8
.type gcm_init_v8,%function .type gcm_init_v8,%function
.align 4 .align 4
gcm_init_v8: gcm_init_v8:
@ -55,6 +56,7 @@ gcm_init_v8:
bx lr bx lr
.size gcm_init_v8,.-gcm_init_v8 .size gcm_init_v8,.-gcm_init_v8
.globl gcm_gmult_v8 .globl gcm_gmult_v8
.hidden gcm_gmult_v8
.type gcm_gmult_v8,%function .type gcm_gmult_v8,%function
.align 4 .align 4
gcm_gmult_v8: gcm_gmult_v8:
@ -67,10 +69,10 @@ gcm_gmult_v8:
#endif #endif
vext.8 q3,q9,q9,#8 vext.8 q3,q9,q9,#8
.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
veor q9,q9,q3 @ Karatsuba pre-processing veor q9,q9,q3 @ Karatsuba pre-processing
.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
veor q10,q0,q2 veor q10,q0,q2
@ -96,6 +98,7 @@ gcm_gmult_v8:
bx lr bx lr
.size gcm_gmult_v8,.-gcm_gmult_v8 .size gcm_gmult_v8,.-gcm_gmult_v8
.globl gcm_ghash_v8 .globl gcm_ghash_v8
.hidden gcm_ghash_v8
.type gcm_ghash_v8,%function .type gcm_ghash_v8,%function
.align 4 .align 4
gcm_ghash_v8: gcm_ghash_v8:
@ -135,7 +138,7 @@ gcm_ghash_v8:
#endif #endif
vext.8 q7,q9,q9,#8 vext.8 q7,q9,q9,#8
veor q3,q3,q0 @ I[i]^=Xi veor q3,q3,q0 @ I[i]^=Xi
.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1 .byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
veor q9,q9,q7 @ Karatsuba pre-processing veor q9,q9,q7 @ Karatsuba pre-processing
.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
b .Loop_mod2x_v8 b .Loop_mod2x_v8
@ -144,14 +147,14 @@ gcm_ghash_v8:
.Loop_mod2x_v8: .Loop_mod2x_v8:
vext.8 q10,q3,q3,#8 vext.8 q10,q3,q3,#8
subs r3,r3,#32 @ is there more data? subs r3,r3,#32 @ is there more data?
.byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
movlo r12,#0 @ is it time to zero r12? movlo r12,#0 @ is it time to zero r12?
.byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9 .byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9
veor q10,q10,q3 @ Karatsuba pre-processing veor q10,q10,q3 @ Karatsuba pre-processing
.byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
veor q0,q0,q4 @ accumulate veor q0,q0,q4 @ accumulate
.byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) .byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2] vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2]
veor q2,q2,q6 veor q2,q2,q6
@ -176,7 +179,7 @@ gcm_ghash_v8:
vext.8 q7,q9,q9,#8 vext.8 q7,q9,q9,#8
vext.8 q3,q8,q8,#8 vext.8 q3,q8,q8,#8
veor q0,q1,q10 veor q0,q1,q10
.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1 .byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
veor q3,q3,q2 @ accumulate q3 early veor q3,q3,q2 @ accumulate q3 early
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
@ -197,10 +200,10 @@ gcm_ghash_v8:
veor q3,q3,q0 @ inp^=Xi veor q3,q3,q0 @ inp^=Xi
veor q9,q8,q10 @ q9 is rotated inp^Xi veor q9,q8,q10 @ q9 is rotated inp^Xi
.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
veor q9,q9,q3 @ Karatsuba pre-processing veor q9,q9,q3 @ Karatsuba pre-processing
.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
veor q10,q0,q2 veor q10,q0,q2
@ -230,4 +233,4 @@ gcm_ghash_v8:
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2 .align 2
.align 2 .align 2
#endif #endif

View File

@ -1,10 +1,11 @@
#if defined(__arm__) #if defined(__arm__)
#include "arm_arch.h" #include <openssl/arm_arch.h>
.text .text
.code 32 .code 32
.globl sha1_block_data_order .globl sha1_block_data_order
.hidden sha1_block_data_order
.type sha1_block_data_order,%function .type sha1_block_data_order,%function
.align 5 .align 5
@ -1459,4 +1460,4 @@ sha1_block_data_order_armv8:
.comm OPENSSL_armcap_P,4,4 .comm OPENSSL_armcap_P,4,4
.hidden OPENSSL_armcap_P .hidden OPENSSL_armcap_P
#endif #endif
#endif #endif

View File

@ -38,7 +38,7 @@
@ Add ARMv8 code path performing at 2.0 cpb on Apple A7. @ Add ARMv8 code path performing at 2.0 cpb on Apple A7.
#ifndef __KERNEL__ #ifndef __KERNEL__
# include "arm_arch.h" # include <openssl/arm_arch.h>
#else #else
# define __ARM_ARCH__ __LINUX_ARM_ARCH__ # define __ARM_ARCH__ __LINUX_ARM_ARCH__
# define __ARM_MAX_ARCH__ 7 # define __ARM_MAX_ARCH__ 7
@ -85,6 +85,7 @@ K256:
.align 5 .align 5
.globl sha256_block_data_order .globl sha256_block_data_order
.hidden sha256_block_data_order
.type sha256_block_data_order,%function .type sha256_block_data_order,%function
sha256_block_data_order: sha256_block_data_order:
.Lsha256_block_data_order: .Lsha256_block_data_order:
@ -1875,6 +1876,7 @@ sha256_block_data_order:
.fpu neon .fpu neon
.globl sha256_block_data_order_neon .globl sha256_block_data_order_neon
.hidden sha256_block_data_order_neon
.type sha256_block_data_order_neon,%function .type sha256_block_data_order_neon,%function
.align 4 .align 4
sha256_block_data_order_neon: sha256_block_data_order_neon:
@ -2815,4 +2817,4 @@ sha256_block_data_order_armv8:
.comm OPENSSL_armcap_P,4,4 .comm OPENSSL_armcap_P,4,4
.hidden OPENSSL_armcap_P .hidden OPENSSL_armcap_P
#endif #endif
#endif #endif

View File

@ -47,7 +47,7 @@
@ was reflected in below two parameters as 0 and 4. Now caller is @ was reflected in below two parameters as 0 and 4. Now caller is
@ expected to maintain native byte order for whole 64-bit values. @ expected to maintain native byte order for whole 64-bit values.
#ifndef __KERNEL__ #ifndef __KERNEL__
# include "arm_arch.h" # include <openssl/arm_arch.h>
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15} # define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15} # define VFP_ABI_POP vldmia sp!,{d8-d15}
#else #else
@ -133,6 +133,7 @@ K512:
#endif #endif
.globl sha512_block_data_order .globl sha512_block_data_order
.hidden sha512_block_data_order
.type sha512_block_data_order,%function .type sha512_block_data_order,%function
sha512_block_data_order: sha512_block_data_order:
.Lsha512_block_data_order: .Lsha512_block_data_order:
@ -147,7 +148,7 @@ sha512_block_data_order:
#ifdef __APPLE__ #ifdef __APPLE__
ldr r12,[r12] ldr r12,[r12]
#endif #endif
tst r12,#1 tst r12,#ARMV7_NEON
bne .LNEON bne .LNEON
#endif #endif
add r2,r1,r2,lsl#7 @ len to point at the end of inp add r2,r1,r2,lsl#7 @ len to point at the end of inp
@ -533,6 +534,7 @@ sha512_block_data_order:
.fpu neon .fpu neon
.globl sha512_block_data_order_neon .globl sha512_block_data_order_neon
.hidden sha512_block_data_order_neon
.type sha512_block_data_order_neon,%function .type sha512_block_data_order_neon,%function
.align 4 .align 4
sha512_block_data_order_neon: sha512_block_data_order_neon:
@ -1866,4 +1868,4 @@ sha512_block_data_order_neon:
.comm OPENSSL_armcap_P,4,4 .comm OPENSSL_armcap_P,4,4
.hidden OPENSSL_armcap_P .hidden OPENSSL_armcap_P
#endif #endif
#endif #endif

View File

@ -0,0 +1,969 @@
#if defined(__i386__)
.file "chacha-x86.S"
.text
.globl ChaCha20_ctr32
.hidden ChaCha20_ctr32
.type ChaCha20_ctr32,@function
.align 16
ChaCha20_ctr32:
.L_ChaCha20_ctr32_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %eax,%eax
cmpl 28(%esp),%eax
je .L000no_data
call .Lpic_point
.Lpic_point:
popl %eax
leal OPENSSL_ia32cap_P-.Lpic_point(%eax),%ebp
testl $16777216,(%ebp)
jz .L001x86
testl $512,4(%ebp)
jz .L001x86
jmp .Lssse3_shortcut
.L001x86:
movl 32(%esp),%esi
movl 36(%esp),%edi
subl $132,%esp
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
movl %eax,80(%esp)
movl %ebx,84(%esp)
movl %ecx,88(%esp)
movl %edx,92(%esp)
movl 16(%esi),%eax
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edx
movl %eax,96(%esp)
movl %ebx,100(%esp)
movl %ecx,104(%esp)
movl %edx,108(%esp)
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
subl $1,%eax
movl %eax,112(%esp)
movl %ebx,116(%esp)
movl %ecx,120(%esp)
movl %edx,124(%esp)
jmp .L002entry
.align 16
.L003outer_loop:
movl %ebx,156(%esp)
movl %eax,152(%esp)
movl %ecx,160(%esp)
.L002entry:
movl $1634760805,%eax
movl $857760878,4(%esp)
movl $2036477234,8(%esp)
movl $1797285236,12(%esp)
movl 84(%esp),%ebx
movl 88(%esp),%ebp
movl 104(%esp),%ecx
movl 108(%esp),%esi
movl 116(%esp),%edx
movl 120(%esp),%edi
movl %ebx,20(%esp)
movl %ebp,24(%esp)
movl %ecx,40(%esp)
movl %esi,44(%esp)
movl %edx,52(%esp)
movl %edi,56(%esp)
movl 92(%esp),%ebx
movl 124(%esp),%edi
movl 112(%esp),%edx
movl 80(%esp),%ebp
movl 96(%esp),%ecx
movl 100(%esp),%esi
addl $1,%edx
movl %ebx,28(%esp)
movl %edi,60(%esp)
movl %edx,112(%esp)
movl $10,%ebx
jmp .L004loop
.align 16
.L004loop:
addl %ebp,%eax
movl %ebx,128(%esp)
movl %ebp,%ebx
xorl %eax,%edx
roll $16,%edx
addl %edx,%ecx
xorl %ecx,%ebx
movl 52(%esp),%edi
roll $12,%ebx
movl 20(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,(%esp)
roll $8,%edx
movl 4(%esp),%eax
addl %edx,%ecx
movl %edx,48(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
movl %ecx,32(%esp)
roll $16,%edi
movl %ebx,16(%esp)
addl %edi,%esi
movl 40(%esp),%ecx
xorl %esi,%ebp
movl 56(%esp),%edx
roll $12,%ebp
movl 24(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,4(%esp)
roll $8,%edi
movl 8(%esp),%eax
addl %edi,%esi
movl %edi,52(%esp)
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
movl %esi,36(%esp)
roll $16,%edx
movl %ebp,20(%esp)
addl %edx,%ecx
movl 44(%esp),%esi
xorl %ecx,%ebx
movl 60(%esp),%edi
roll $12,%ebx
movl 28(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,8(%esp)
roll $8,%edx
movl 12(%esp),%eax
addl %edx,%ecx
movl %edx,56(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
roll $16,%edi
movl %ebx,24(%esp)
addl %edi,%esi
xorl %esi,%ebp
roll $12,%ebp
movl 20(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,12(%esp)
roll $8,%edi
movl (%esp),%eax
addl %edi,%esi
movl %edi,%edx
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
roll $16,%edx
movl %ebp,28(%esp)
addl %edx,%ecx
xorl %ecx,%ebx
movl 48(%esp),%edi
roll $12,%ebx
movl 24(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,(%esp)
roll $8,%edx
movl 4(%esp),%eax
addl %edx,%ecx
movl %edx,60(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
movl %ecx,40(%esp)
roll $16,%edi
movl %ebx,20(%esp)
addl %edi,%esi
movl 32(%esp),%ecx
xorl %esi,%ebp
movl 52(%esp),%edx
roll $12,%ebp
movl 28(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,4(%esp)
roll $8,%edi
movl 8(%esp),%eax
addl %edi,%esi
movl %edi,48(%esp)
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
movl %esi,44(%esp)
roll $16,%edx
movl %ebp,24(%esp)
addl %edx,%ecx
movl 36(%esp),%esi
xorl %ecx,%ebx
movl 56(%esp),%edi
roll $12,%ebx
movl 16(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,8(%esp)
roll $8,%edx
movl 12(%esp),%eax
addl %edx,%ecx
movl %edx,52(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
roll $16,%edi
movl %ebx,28(%esp)
addl %edi,%esi
xorl %esi,%ebp
movl 48(%esp),%edx
roll $12,%ebp
movl 128(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,12(%esp)
roll $8,%edi
movl (%esp),%eax
addl %edi,%esi
movl %edi,56(%esp)
xorl %esi,%ebp
roll $7,%ebp
decl %ebx
jnz .L004loop
movl 160(%esp),%ebx
addl $1634760805,%eax
addl 80(%esp),%ebp
addl 96(%esp),%ecx
addl 100(%esp),%esi
cmpl $64,%ebx
jb .L005tail
movl 156(%esp),%ebx
addl 112(%esp),%edx
addl 120(%esp),%edi
xorl (%ebx),%eax
xorl 16(%ebx),%ebp
movl %eax,(%esp)
movl 152(%esp),%eax
xorl 32(%ebx),%ecx
xorl 36(%ebx),%esi
xorl 48(%ebx),%edx
xorl 56(%ebx),%edi
movl %ebp,16(%eax)
movl %ecx,32(%eax)
movl %esi,36(%eax)
movl %edx,48(%eax)
movl %edi,56(%eax)
movl 4(%esp),%ebp
movl 8(%esp),%ecx
movl 12(%esp),%esi
movl 20(%esp),%edx
movl 24(%esp),%edi
addl $857760878,%ebp
addl $2036477234,%ecx
addl $1797285236,%esi
addl 84(%esp),%edx
addl 88(%esp),%edi
xorl 4(%ebx),%ebp
xorl 8(%ebx),%ecx
xorl 12(%ebx),%esi
xorl 20(%ebx),%edx
xorl 24(%ebx),%edi
movl %ebp,4(%eax)
movl %ecx,8(%eax)
movl %esi,12(%eax)
movl %edx,20(%eax)
movl %edi,24(%eax)
movl 28(%esp),%ebp
movl 40(%esp),%ecx
movl 44(%esp),%esi
movl 52(%esp),%edx
movl 60(%esp),%edi
addl 92(%esp),%ebp
addl 104(%esp),%ecx
addl 108(%esp),%esi
addl 116(%esp),%edx
addl 124(%esp),%edi
xorl 28(%ebx),%ebp
xorl 40(%ebx),%ecx
xorl 44(%ebx),%esi
xorl 52(%ebx),%edx
xorl 60(%ebx),%edi
leal 64(%ebx),%ebx
movl %ebp,28(%eax)
movl (%esp),%ebp
movl %ecx,40(%eax)
movl 160(%esp),%ecx
movl %esi,44(%eax)
movl %edx,52(%eax)
movl %edi,60(%eax)
movl %ebp,(%eax)
leal 64(%eax),%eax
subl $64,%ecx
jnz .L003outer_loop
jmp .L006done
.L005tail:
addl 112(%esp),%edx
addl 120(%esp),%edi
movl %eax,(%esp)
movl %ebp,16(%esp)
movl %ecx,32(%esp)
movl %esi,36(%esp)
movl %edx,48(%esp)
movl %edi,56(%esp)
movl 4(%esp),%ebp
movl 8(%esp),%ecx
movl 12(%esp),%esi
movl 20(%esp),%edx
movl 24(%esp),%edi
addl $857760878,%ebp
addl $2036477234,%ecx
addl $1797285236,%esi
addl 84(%esp),%edx
addl 88(%esp),%edi
movl %ebp,4(%esp)
movl %ecx,8(%esp)
movl %esi,12(%esp)
movl %edx,20(%esp)
movl %edi,24(%esp)
movl 28(%esp),%ebp
movl 40(%esp),%ecx
movl 44(%esp),%esi
movl 52(%esp),%edx
movl 60(%esp),%edi
addl 92(%esp),%ebp
addl 104(%esp),%ecx
addl 108(%esp),%esi
addl 116(%esp),%edx
addl 124(%esp),%edi
movl %ebp,28(%esp)
movl 156(%esp),%ebp
movl %ecx,40(%esp)
movl 152(%esp),%ecx
movl %esi,44(%esp)
xorl %esi,%esi
movl %edx,52(%esp)
movl %edi,60(%esp)
xorl %eax,%eax
xorl %edx,%edx
.L007tail_loop:
movb (%esi,%ebp,1),%al
movb (%esp,%esi,1),%dl
leal 1(%esi),%esi
xorb %dl,%al
movb %al,-1(%ecx,%esi,1)
decl %ebx
jnz .L007tail_loop
.L006done:
addl $132,%esp
.L000no_data:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size ChaCha20_ctr32,.-.L_ChaCha20_ctr32_begin
.globl ChaCha20_ssse3
.hidden ChaCha20_ssse3
.type ChaCha20_ssse3,@function
.align 16
ChaCha20_ssse3:
.L_ChaCha20_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
.Lssse3_shortcut:
movl 20(%esp),%edi
movl 24(%esp),%esi
movl 28(%esp),%ecx
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $524,%esp
andl $-64,%esp
movl %ebp,512(%esp)
leal .Lssse3_data-.Lpic_point(%eax),%eax
movdqu (%ebx),%xmm3
cmpl $256,%ecx
jb .L0081x
movl %edx,516(%esp)
movl %ebx,520(%esp)
subl $256,%ecx
leal 384(%esp),%ebp
movdqu (%edx),%xmm7
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
paddd 48(%eax),%xmm0
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
psubd 64(%eax),%xmm0
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,64(%ebp)
movdqa %xmm1,80(%ebp)
movdqa %xmm2,96(%ebp)
movdqa %xmm3,112(%ebp)
movdqu 16(%edx),%xmm3
movdqa %xmm4,-64(%ebp)
movdqa %xmm5,-48(%ebp)
movdqa %xmm6,-32(%ebp)
movdqa %xmm7,-16(%ebp)
movdqa 32(%eax),%xmm7
leal 128(%esp),%ebx
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,(%ebp)
movdqa %xmm1,16(%ebp)
movdqa %xmm2,32(%ebp)
movdqa %xmm3,48(%ebp)
movdqa %xmm4,-128(%ebp)
movdqa %xmm5,-112(%ebp)
movdqa %xmm6,-96(%ebp)
movdqa %xmm7,-80(%ebp)
leal 128(%esi),%esi
leal 128(%edi),%edi
jmp .L009outer_loop
.align 16
.L009outer_loop:
movdqa -112(%ebp),%xmm1
movdqa -96(%ebp),%xmm2
movdqa -80(%ebp),%xmm3
movdqa -48(%ebp),%xmm5
movdqa -32(%ebp),%xmm6
movdqa -16(%ebp),%xmm7
movdqa %xmm1,-112(%ebx)
movdqa %xmm2,-96(%ebx)
movdqa %xmm3,-80(%ebx)
movdqa %xmm5,-48(%ebx)
movdqa %xmm6,-32(%ebx)
movdqa %xmm7,-16(%ebx)
movdqa 32(%ebp),%xmm2
movdqa 48(%ebp),%xmm3
movdqa 64(%ebp),%xmm4
movdqa 80(%ebp),%xmm5
movdqa 96(%ebp),%xmm6
movdqa 112(%ebp),%xmm7
paddd 64(%eax),%xmm4
movdqa %xmm2,32(%ebx)
movdqa %xmm3,48(%ebx)
movdqa %xmm4,64(%ebx)
movdqa %xmm5,80(%ebx)
movdqa %xmm6,96(%ebx)
movdqa %xmm7,112(%ebx)
movdqa %xmm4,64(%ebp)
movdqa -128(%ebp),%xmm0
movdqa %xmm4,%xmm6
movdqa -64(%ebp),%xmm3
movdqa (%ebp),%xmm4
movdqa 16(%ebp),%xmm5
movl $10,%edx
nop
.align 16
.L010loop:
paddd %xmm3,%xmm0
movdqa %xmm3,%xmm2
pxor %xmm0,%xmm6
pshufb (%eax),%xmm6
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -48(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 80(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,64(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-64(%ebx)
paddd %xmm7,%xmm5
movdqa 32(%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -32(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 96(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,80(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,16(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-48(%ebx)
paddd %xmm6,%xmm4
movdqa 48(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -16(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 112(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,96(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-32(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa -48(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,%xmm6
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
pshufb (%eax),%xmm6
movdqa %xmm3,-16(%ebx)
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -32(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 64(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,112(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,32(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-48(%ebx)
paddd %xmm7,%xmm5
movdqa (%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -16(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 80(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,64(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,48(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-32(%ebx)
paddd %xmm6,%xmm4
movdqa 16(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -64(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 96(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,80(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-16(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 64(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,96(%ebx)
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
por %xmm1,%xmm3
decl %edx
jnz .L010loop
movdqa %xmm3,-64(%ebx)
movdqa %xmm4,(%ebx)
movdqa %xmm5,16(%ebx)
movdqa %xmm6,64(%ebx)
movdqa %xmm7,96(%ebx)
movdqa -112(%ebx),%xmm1
movdqa -96(%ebx),%xmm2
movdqa -80(%ebx),%xmm3
paddd -128(%ebp),%xmm0
paddd -112(%ebp),%xmm1
paddd -96(%ebp),%xmm2
paddd -80(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa -64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa -48(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa -32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa -16(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd -64(%ebp),%xmm0
paddd -48(%ebp),%xmm1
paddd -32(%ebp),%xmm2
paddd -16(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa (%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 16(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 48(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd (%ebp),%xmm0
paddd 16(%ebp),%xmm1
paddd 32(%ebp),%xmm2
paddd 48(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa 64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 80(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 96(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 112(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd 64(%ebp),%xmm0
paddd 80(%ebp),%xmm1
paddd 96(%ebp),%xmm2
paddd 112(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 208(%esi),%esi
pxor %xmm0,%xmm4
pxor %xmm1,%xmm5
pxor %xmm2,%xmm6
pxor %xmm3,%xmm7
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 208(%edi),%edi
subl $256,%ecx
jnc .L009outer_loop
addl $256,%ecx
jz .L011done
movl 520(%esp),%ebx
leal -128(%esi),%esi
movl 516(%esp),%edx
leal -128(%edi),%edi
movd 64(%ebp),%xmm2
movdqu (%ebx),%xmm3
paddd 96(%eax),%xmm2
pand 112(%eax),%xmm3
por %xmm2,%xmm3
.L0081x:
movdqa 32(%eax),%xmm0
movdqu (%edx),%xmm1
movdqu 16(%edx),%xmm2
movdqa (%eax),%xmm6
movdqa 16(%eax),%xmm7
movl %ebp,48(%esp)
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
movl $10,%edx
jmp .L012loop1x
.align 16
.L013outer1x:
movdqa 80(%eax),%xmm3
movdqa (%esp),%xmm0
movdqa 16(%esp),%xmm1
movdqa 32(%esp),%xmm2
paddd 48(%esp),%xmm3
movl $10,%edx
movdqa %xmm3,48(%esp)
jmp .L012loop1x
.align 16
.L012loop1x:
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $57,%xmm1,%xmm1
pshufd $147,%xmm3,%xmm3
nop
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $147,%xmm1,%xmm1
pshufd $57,%xmm3,%xmm3
decl %edx
jnz .L012loop1x
paddd (%esp),%xmm0
paddd 16(%esp),%xmm1
paddd 32(%esp),%xmm2
paddd 48(%esp),%xmm3
cmpl $64,%ecx
jb .L014tail
movdqu (%esi),%xmm4
movdqu 16(%esi),%xmm5
pxor %xmm4,%xmm0
movdqu 32(%esi),%xmm4
pxor %xmm5,%xmm1
movdqu 48(%esi),%xmm5
pxor %xmm4,%xmm2
pxor %xmm5,%xmm3
leal 64(%esi),%esi
movdqu %xmm0,(%edi)
movdqu %xmm1,16(%edi)
movdqu %xmm2,32(%edi)
movdqu %xmm3,48(%edi)
leal 64(%edi),%edi
subl $64,%ecx
jnz .L013outer1x
jmp .L011done
.L014tail:
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
xorl %eax,%eax
xorl %edx,%edx
xorl %ebp,%ebp
.L015tail_loop:
movb (%esp,%ebp,1),%al
movb (%esi,%ebp,1),%dl
leal 1(%ebp),%ebp
xorb %dl,%al
movb %al,-1(%edi,%ebp,1)
decl %ecx
jnz .L015tail_loop
.L011done:
movl 512(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size ChaCha20_ssse3,.-.L_ChaCha20_ssse3_begin
.align 64
.Lssse3_data:
.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
.long 1634760805,857760878,2036477234,1797285236
.long 0,1,2,3
.long 4,4,4,4
.long 1,0,0,0
.long 4,0,0,0
.long 0,-1,-1,-1
.align 64
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54
.byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32
.byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111
.byte 114,103,62,0
#endif

View File

@ -347,39 +347,4 @@ asm_RC4_set_key:
popl %ebp popl %ebp
ret ret
.size asm_RC4_set_key,.-.L_asm_RC4_set_key_begin .size asm_RC4_set_key,.-.L_asm_RC4_set_key_begin
.globl RC4_options
.hidden RC4_options
.type RC4_options,@function
.align 16
RC4_options:
.L_RC4_options_begin:
call .L018pic_point
.L018pic_point:
popl %eax
leal .L019opts-.L018pic_point(%eax),%eax
call .L020PIC_me_up
.L020PIC_me_up:
popl %edx
leal OPENSSL_ia32cap_P-.L020PIC_me_up(%edx),%edx
movl (%edx),%edx
btl $20,%edx
jc .L0211xchar
btl $26,%edx
jnc .L022ret
addl $25,%eax
ret
.L0211xchar:
addl $12,%eax
.L022ret:
ret
.align 64
.L019opts:
.byte 114,99,52,40,52,120,44,105,110,116,41,0
.byte 114,99,52,40,49,120,44,99,104,97,114,41,0
.byte 114,99,52,40,56,120,44,109,109,120,41,0
.byte 82,67,52,32,102,111,114,32,120,56,54,44,32,67,82,89
.byte 80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114
.byte 111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
.size RC4_options,.-.L_RC4_options_begin
#endif #endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -82,8 +82,8 @@ _x86_64_AES_encrypt:
movl 0(%r14,%rdi,8),%edi movl 0(%r14,%rdi,8),%edi
movl 0(%r14,%rbp,8),%ebp movl 0(%r14,%rbp,8),%ebp
andl $65280,%edi andl $0x0000ff00,%edi
andl $65280,%ebp andl $0x0000ff00,%ebp
xorl %edi,%r10d xorl %edi,%r10d
xorl %ebp,%r11d xorl %ebp,%r11d
@ -95,8 +95,8 @@ _x86_64_AES_encrypt:
movl 0(%r14,%rsi,8),%esi movl 0(%r14,%rsi,8),%esi
movl 0(%r14,%rdi,8),%edi movl 0(%r14,%rdi,8),%edi
andl $65280,%esi andl $0x0000ff00,%esi
andl $65280,%edi andl $0x0000ff00,%edi
shrl $16,%ebx shrl $16,%ebx
xorl %esi,%r12d xorl %esi,%r12d
xorl %edi,%r8d xorl %edi,%r8d
@ -109,9 +109,9 @@ _x86_64_AES_encrypt:
movl 0(%r14,%rdi,8),%edi movl 0(%r14,%rdi,8),%edi
movl 0(%r14,%rbp,8),%ebp movl 0(%r14,%rbp,8),%ebp
andl $16711680,%esi andl $0x00ff0000,%esi
andl $16711680,%edi andl $0x00ff0000,%edi
andl $16711680,%ebp andl $0x00ff0000,%ebp
xorl %esi,%r10d xorl %esi,%r10d
xorl %edi,%r11d xorl %edi,%r11d
@ -124,9 +124,9 @@ _x86_64_AES_encrypt:
movl 2(%r14,%rdi,8),%edi movl 2(%r14,%rdi,8),%edi
movl 2(%r14,%rbp,8),%ebp movl 2(%r14,%rbp,8),%ebp
andl $16711680,%esi andl $0x00ff0000,%esi
andl $4278190080,%edi andl $0xff000000,%edi
andl $4278190080,%ebp andl $0xff000000,%ebp
xorl %esi,%r8d xorl %esi,%r8d
xorl %edi,%r10d xorl %edi,%r10d
@ -139,8 +139,8 @@ _x86_64_AES_encrypt:
movl 2(%r14,%rdi,8),%edi movl 2(%r14,%rdi,8),%edi
movl 16+0(%r15),%eax movl 16+0(%r15),%eax
andl $4278190080,%esi andl $0xff000000,%esi
andl $4278190080,%edi andl $0xff000000,%edi
xorl %esi,%r12d xorl %esi,%r12d
xorl %edi,%r8d xorl %edi,%r8d
@ -242,8 +242,8 @@ _x86_64_AES_encrypt_compact:
xorl %r8d,%edx xorl %r8d,%edx
cmpq 16(%rsp),%r15 cmpq 16(%rsp),%r15
je .Lenc_compact_done je .Lenc_compact_done
movl $2155905152,%r10d movl $0x80808080,%r10d
movl $2155905152,%r11d movl $0x80808080,%r11d
andl %eax,%r10d andl %eax,%r10d
andl %ebx,%r11d andl %ebx,%r11d
movl %r10d,%esi movl %r10d,%esi
@ -254,10 +254,10 @@ _x86_64_AES_encrypt_compact:
leal (%rbx,%rbx,1),%r9d leal (%rbx,%rbx,1),%r9d
subl %r10d,%esi subl %r10d,%esi
subl %r11d,%edi subl %r11d,%edi
andl $4278124286,%r8d andl $0xfefefefe,%r8d
andl $4278124286,%r9d andl $0xfefefefe,%r9d
andl $454761243,%esi andl $0x1b1b1b1b,%esi
andl $454761243,%edi andl $0x1b1b1b1b,%edi
movl %eax,%r10d movl %eax,%r10d
movl %ebx,%r11d movl %ebx,%r11d
xorl %esi,%r8d xorl %esi,%r8d
@ -265,9 +265,9 @@ _x86_64_AES_encrypt_compact:
xorl %r8d,%eax xorl %r8d,%eax
xorl %r9d,%ebx xorl %r9d,%ebx
movl $2155905152,%r12d movl $0x80808080,%r12d
roll $24,%eax roll $24,%eax
movl $2155905152,%ebp movl $0x80808080,%ebp
roll $24,%ebx roll $24,%ebx
andl %ecx,%r12d andl %ecx,%r12d
andl %edx,%ebp andl %edx,%ebp
@ -290,10 +290,10 @@ _x86_64_AES_encrypt_compact:
xorl %r10d,%eax xorl %r10d,%eax
xorl %r11d,%ebx xorl %r11d,%ebx
andl $4278124286,%r8d andl $0xfefefefe,%r8d
andl $4278124286,%r9d andl $0xfefefefe,%r9d
andl $454761243,%esi andl $0x1b1b1b1b,%esi
andl $454761243,%edi andl $0x1b1b1b1b,%edi
movl %ecx,%r12d movl %ecx,%r12d
movl %edx,%ebp movl %edx,%ebp
xorl %esi,%r8d xorl %esi,%r8d
@ -345,7 +345,7 @@ asm_AES_encrypt:
andq $-64,%rsp andq $-64,%rsp
subq %rsp,%rcx subq %rsp,%rcx
negq %rcx negq %rcx
andq $960,%rcx andq $0x3c0,%rcx
subq %rcx,%rsp subq %rcx,%rsp
subq $32,%rsp subq $32,%rsp
@ -370,7 +370,7 @@ asm_AES_encrypt:
leaq .LAES_Te+2048(%rip),%r14 leaq .LAES_Te+2048(%rip),%r14
leaq 768(%rsp),%rbp leaq 768(%rsp),%rbp
subq %r14,%rbp subq %r14,%rbp
andq $768,%rbp andq $0x300,%rbp
leaq (%r14,%rbp,1),%r14 leaq (%r14,%rbp,1),%r14
call _x86_64_AES_encrypt_compact call _x86_64_AES_encrypt_compact
@ -791,7 +791,7 @@ asm_AES_decrypt:
andq $-64,%rsp andq $-64,%rsp
subq %rsp,%rcx subq %rsp,%rcx
negq %rcx negq %rcx
andq $960,%rcx andq $0x3c0,%rcx
subq %rcx,%rsp subq %rcx,%rsp
subq $32,%rsp subq $32,%rsp
@ -816,7 +816,7 @@ asm_AES_decrypt:
leaq .LAES_Td+2048(%rip),%r14 leaq .LAES_Td+2048(%rip),%r14
leaq 768(%rsp),%rbp leaq 768(%rsp),%rbp
subq %r14,%rbp subq %r14,%rbp
andq $768,%rbp andq $0x300,%rbp
leaq (%r14,%rbp,1),%r14 leaq (%r14,%rbp,1),%r14
shrq $3,%rbp shrq $3,%rbp
addq %rbp,%r14 addq %rbp,%r14
@ -1334,9 +1334,9 @@ asm_AES_cbc_encrypt:
movq %r14,%r10 movq %r14,%r10
leaq 2304(%r14),%r11 leaq 2304(%r14),%r11
movq %r15,%r12 movq %r15,%r12
andq $4095,%r10 andq $0xFFF,%r10
andq $4095,%r11 andq $0xFFF,%r11
andq $4095,%r12 andq $0xFFF,%r12
cmpq %r11,%r12 cmpq %r11,%r12
jb .Lcbc_te_break_out jb .Lcbc_te_break_out
@ -1345,7 +1345,7 @@ asm_AES_cbc_encrypt:
jmp .Lcbc_te_ok jmp .Lcbc_te_ok
.Lcbc_te_break_out: .Lcbc_te_break_out:
subq %r10,%r12 subq %r10,%r12
andq $4095,%r12 andq $0xFFF,%r12
addq $320,%r12 addq $320,%r12
subq %r12,%r15 subq %r12,%r15
.align 4 .align 4
@ -1371,7 +1371,7 @@ asm_AES_cbc_encrypt:
movq %r15,%r10 movq %r15,%r10
subq %r14,%r10 subq %r14,%r10
andq $4095,%r10 andq $0xfff,%r10
cmpq $2304,%r10 cmpq $2304,%r10
jb .Lcbc_do_ecopy jb .Lcbc_do_ecopy
cmpq $4096-248,%r10 cmpq $4096-248,%r10
@ -1558,7 +1558,7 @@ asm_AES_cbc_encrypt:
leaq -88-63(%rcx),%r10 leaq -88-63(%rcx),%r10
subq %rbp,%r10 subq %rbp,%r10
negq %r10 negq %r10
andq $960,%r10 andq $0x3c0,%r10
subq %r10,%rbp subq %r10,%rbp
xchgq %rsp,%rbp xchgq %rsp,%rbp
@ -1587,7 +1587,7 @@ asm_AES_cbc_encrypt:
leaq 2048(%r14),%r14 leaq 2048(%r14),%r14
leaq 768-8(%rsp),%rax leaq 768-8(%rsp),%rax
subq %r14,%rax subq %r14,%rax
andq $768,%rax andq $0x300,%rax
leaq (%r14,%rax,1),%r14 leaq (%r14,%rax,1),%r14
cmpq $0,%rbx cmpq $0,%rbx

View File

@ -508,7 +508,7 @@ aesni_ecb_encrypt:
testl %r8d,%r8d testl %r8d,%r8d
jz .Lecb_decrypt jz .Lecb_decrypt
cmpq $128,%rdx cmpq $0x80,%rdx
jb .Lecb_enc_tail jb .Lecb_enc_tail
movdqu (%rdi),%xmm2 movdqu (%rdi),%xmm2
@ -520,7 +520,7 @@ aesni_ecb_encrypt:
movdqu 96(%rdi),%xmm8 movdqu 96(%rdi),%xmm8
movdqu 112(%rdi),%xmm9 movdqu 112(%rdi),%xmm9
leaq 128(%rdi),%rdi leaq 128(%rdi),%rdi
subq $128,%rdx subq $0x80,%rdx
jmp .Lecb_enc_loop8_enter jmp .Lecb_enc_loop8_enter
.align 16 .align 16
.Lecb_enc_loop8: .Lecb_enc_loop8:
@ -548,7 +548,7 @@ aesni_ecb_encrypt:
call _aesni_encrypt8 call _aesni_encrypt8
subq $128,%rdx subq $0x80,%rdx
jnc .Lecb_enc_loop8 jnc .Lecb_enc_loop8
movups %xmm2,(%rsi) movups %xmm2,(%rsi)
@ -562,22 +562,22 @@ aesni_ecb_encrypt:
movups %xmm8,96(%rsi) movups %xmm8,96(%rsi)
movups %xmm9,112(%rsi) movups %xmm9,112(%rsi)
leaq 128(%rsi),%rsi leaq 128(%rsi),%rsi
addq $128,%rdx addq $0x80,%rdx
jz .Lecb_ret jz .Lecb_ret
.Lecb_enc_tail: .Lecb_enc_tail:
movups (%rdi),%xmm2 movups (%rdi),%xmm2
cmpq $32,%rdx cmpq $0x20,%rdx
jb .Lecb_enc_one jb .Lecb_enc_one
movups 16(%rdi),%xmm3 movups 16(%rdi),%xmm3
je .Lecb_enc_two je .Lecb_enc_two
movups 32(%rdi),%xmm4 movups 32(%rdi),%xmm4
cmpq $64,%rdx cmpq $0x40,%rdx
jb .Lecb_enc_three jb .Lecb_enc_three
movups 48(%rdi),%xmm5 movups 48(%rdi),%xmm5
je .Lecb_enc_four je .Lecb_enc_four
movups 64(%rdi),%xmm6 movups 64(%rdi),%xmm6
cmpq $96,%rdx cmpq $0x60,%rdx
jb .Lecb_enc_five jb .Lecb_enc_five
movups 80(%rdi),%xmm7 movups 80(%rdi),%xmm7
je .Lecb_enc_six je .Lecb_enc_six
@ -651,7 +651,7 @@ aesni_ecb_encrypt:
.align 16 .align 16
.Lecb_decrypt: .Lecb_decrypt:
cmpq $128,%rdx cmpq $0x80,%rdx
jb .Lecb_dec_tail jb .Lecb_dec_tail
movdqu (%rdi),%xmm2 movdqu (%rdi),%xmm2
@ -663,7 +663,7 @@ aesni_ecb_encrypt:
movdqu 96(%rdi),%xmm8 movdqu 96(%rdi),%xmm8
movdqu 112(%rdi),%xmm9 movdqu 112(%rdi),%xmm9
leaq 128(%rdi),%rdi leaq 128(%rdi),%rdi
subq $128,%rdx subq $0x80,%rdx
jmp .Lecb_dec_loop8_enter jmp .Lecb_dec_loop8_enter
.align 16 .align 16
.Lecb_dec_loop8: .Lecb_dec_loop8:
@ -692,7 +692,7 @@ aesni_ecb_encrypt:
call _aesni_decrypt8 call _aesni_decrypt8
movups (%r11),%xmm0 movups (%r11),%xmm0
subq $128,%rdx subq $0x80,%rdx
jnc .Lecb_dec_loop8 jnc .Lecb_dec_loop8
movups %xmm2,(%rsi) movups %xmm2,(%rsi)
@ -714,22 +714,22 @@ aesni_ecb_encrypt:
movups %xmm9,112(%rsi) movups %xmm9,112(%rsi)
pxor %xmm9,%xmm9 pxor %xmm9,%xmm9
leaq 128(%rsi),%rsi leaq 128(%rsi),%rsi
addq $128,%rdx addq $0x80,%rdx
jz .Lecb_ret jz .Lecb_ret
.Lecb_dec_tail: .Lecb_dec_tail:
movups (%rdi),%xmm2 movups (%rdi),%xmm2
cmpq $32,%rdx cmpq $0x20,%rdx
jb .Lecb_dec_one jb .Lecb_dec_one
movups 16(%rdi),%xmm3 movups 16(%rdi),%xmm3
je .Lecb_dec_two je .Lecb_dec_two
movups 32(%rdi),%xmm4 movups 32(%rdi),%xmm4
cmpq $64,%rdx cmpq $0x40,%rdx
jb .Lecb_dec_three jb .Lecb_dec_three
movups 48(%rdi),%xmm5 movups 48(%rdi),%xmm5
je .Lecb_dec_four je .Lecb_dec_four
movups 64(%rdi),%xmm6 movups 64(%rdi),%xmm6
cmpq $96,%rdx cmpq $0x60,%rdx
jb .Lecb_dec_five jb .Lecb_dec_five
movups 80(%rdi),%xmm7 movups 80(%rdi),%xmm7
je .Lecb_dec_six je .Lecb_dec_six
@ -1607,7 +1607,7 @@ aesni_xts_encrypt:
movdqa .Lxts_magic(%rip),%xmm8 movdqa .Lxts_magic(%rip),%xmm8
movdqa %xmm2,%xmm15 movdqa %xmm2,%xmm15
pshufd $95,%xmm2,%xmm9 pshufd $0x5f,%xmm2,%xmm9
pxor %xmm0,%xmm1 pxor %xmm0,%xmm1
movdqa %xmm9,%xmm14 movdqa %xmm9,%xmm14
paddd %xmm9,%xmm9 paddd %xmm9,%xmm9
@ -1706,7 +1706,7 @@ aesni_xts_encrypt:
.byte 102,15,56,220,248 .byte 102,15,56,220,248
movups 64(%r11),%xmm0 movups 64(%r11),%xmm0
movdqa %xmm8,80(%rsp) movdqa %xmm8,80(%rsp)
pshufd $95,%xmm15,%xmm9 pshufd $0x5f,%xmm15,%xmm9
jmp .Lxts_enc_loop6 jmp .Lxts_enc_loop6
.align 32 .align 32
.Lxts_enc_loop6: .Lxts_enc_loop6:
@ -1845,13 +1845,13 @@ aesni_xts_encrypt:
jz .Lxts_enc_done jz .Lxts_enc_done
pxor %xmm0,%xmm11 pxor %xmm0,%xmm11
cmpq $32,%rdx cmpq $0x20,%rdx
jb .Lxts_enc_one jb .Lxts_enc_one
pxor %xmm0,%xmm12 pxor %xmm0,%xmm12
je .Lxts_enc_two je .Lxts_enc_two
pxor %xmm0,%xmm13 pxor %xmm0,%xmm13
cmpq $64,%rdx cmpq $0x40,%rdx
jb .Lxts_enc_three jb .Lxts_enc_three
pxor %xmm0,%xmm14 pxor %xmm0,%xmm14
je .Lxts_enc_four je .Lxts_enc_four
@ -2079,7 +2079,7 @@ aesni_xts_decrypt:
movdqa .Lxts_magic(%rip),%xmm8 movdqa .Lxts_magic(%rip),%xmm8
movdqa %xmm2,%xmm15 movdqa %xmm2,%xmm15
pshufd $95,%xmm2,%xmm9 pshufd $0x5f,%xmm2,%xmm9
pxor %xmm0,%xmm1 pxor %xmm0,%xmm1
movdqa %xmm9,%xmm14 movdqa %xmm9,%xmm14
paddd %xmm9,%xmm9 paddd %xmm9,%xmm9
@ -2178,7 +2178,7 @@ aesni_xts_decrypt:
.byte 102,15,56,222,248 .byte 102,15,56,222,248
movups 64(%r11),%xmm0 movups 64(%r11),%xmm0
movdqa %xmm8,80(%rsp) movdqa %xmm8,80(%rsp)
pshufd $95,%xmm15,%xmm9 pshufd $0x5f,%xmm15,%xmm9
jmp .Lxts_dec_loop6 jmp .Lxts_dec_loop6
.align 32 .align 32
.Lxts_dec_loop6: .Lxts_dec_loop6:
@ -2318,13 +2318,13 @@ aesni_xts_decrypt:
jz .Lxts_dec_done jz .Lxts_dec_done
pxor %xmm0,%xmm12 pxor %xmm0,%xmm12
cmpq $32,%rdx cmpq $0x20,%rdx
jb .Lxts_dec_one jb .Lxts_dec_one
pxor %xmm0,%xmm13 pxor %xmm0,%xmm13
je .Lxts_dec_two je .Lxts_dec_two
pxor %xmm0,%xmm14 pxor %xmm0,%xmm14
cmpq $64,%rdx cmpq $0x40,%rdx
jb .Lxts_dec_three jb .Lxts_dec_three
je .Lxts_dec_four je .Lxts_dec_four
@ -2355,7 +2355,7 @@ aesni_xts_decrypt:
pcmpgtd %xmm15,%xmm14 pcmpgtd %xmm15,%xmm14
movdqu %xmm6,64(%rsi) movdqu %xmm6,64(%rsi)
leaq 80(%rsi),%rsi leaq 80(%rsi),%rsi
pshufd $19,%xmm14,%xmm11 pshufd $0x13,%xmm14,%xmm11
andq $15,%r9 andq $15,%r9
jz .Lxts_dec_ret jz .Lxts_dec_ret
@ -2645,7 +2645,7 @@ aesni_cbc_encrypt:
leaq -8(%rax),%rbp leaq -8(%rax),%rbp
movups (%r8),%xmm10 movups (%r8),%xmm10
movl %r10d,%eax movl %r10d,%eax
cmpq $80,%rdx cmpq $0x50,%rdx
jbe .Lcbc_dec_tail jbe .Lcbc_dec_tail
movups (%rcx),%xmm0 movups (%rcx),%xmm0
@ -2661,14 +2661,14 @@ aesni_cbc_encrypt:
movdqu 80(%rdi),%xmm7 movdqu 80(%rdi),%xmm7
movdqa %xmm6,%xmm15 movdqa %xmm6,%xmm15
movl OPENSSL_ia32cap_P+4(%rip),%r9d movl OPENSSL_ia32cap_P+4(%rip),%r9d
cmpq $112,%rdx cmpq $0x70,%rdx
jbe .Lcbc_dec_six_or_seven jbe .Lcbc_dec_six_or_seven
andl $71303168,%r9d andl $71303168,%r9d
subq $80,%rdx subq $0x50,%rdx
cmpl $4194304,%r9d cmpl $4194304,%r9d
je .Lcbc_dec_loop6_enter je .Lcbc_dec_loop6_enter
subq $32,%rdx subq $0x20,%rdx
leaq 112(%rcx),%rcx leaq 112(%rcx),%rcx
jmp .Lcbc_dec_loop8_enter jmp .Lcbc_dec_loop8_enter
.align 16 .align 16
@ -2683,7 +2683,7 @@ aesni_cbc_encrypt:
movups 16-112(%rcx),%xmm1 movups 16-112(%rcx),%xmm1
pxor %xmm0,%xmm4 pxor %xmm0,%xmm4
xorq %r11,%r11 xorq %r11,%r11
cmpq $112,%rdx cmpq $0x70,%rdx
pxor %xmm0,%xmm5 pxor %xmm0,%xmm5
pxor %xmm0,%xmm6 pxor %xmm0,%xmm6
pxor %xmm0,%xmm7 pxor %xmm0,%xmm7
@ -2868,21 +2868,21 @@ aesni_cbc_encrypt:
movups %xmm8,96(%rsi) movups %xmm8,96(%rsi)
leaq 112(%rsi),%rsi leaq 112(%rsi),%rsi
subq $128,%rdx subq $0x80,%rdx
ja .Lcbc_dec_loop8 ja .Lcbc_dec_loop8
movaps %xmm9,%xmm2 movaps %xmm9,%xmm2
leaq -112(%rcx),%rcx leaq -112(%rcx),%rcx
addq $112,%rdx addq $0x70,%rdx
jle .Lcbc_dec_clear_tail_collected jle .Lcbc_dec_clear_tail_collected
movups %xmm9,(%rsi) movups %xmm9,(%rsi)
leaq 16(%rsi),%rsi leaq 16(%rsi),%rsi
cmpq $80,%rdx cmpq $0x50,%rdx
jbe .Lcbc_dec_tail jbe .Lcbc_dec_tail
movaps %xmm11,%xmm2 movaps %xmm11,%xmm2
.Lcbc_dec_six_or_seven: .Lcbc_dec_six_or_seven:
cmpq $96,%rdx cmpq $0x60,%rdx
ja .Lcbc_dec_seven ja .Lcbc_dec_seven
movaps %xmm7,%xmm8 movaps %xmm7,%xmm8
@ -2975,33 +2975,33 @@ aesni_cbc_encrypt:
movl %r10d,%eax movl %r10d,%eax
movdqu %xmm6,64(%rsi) movdqu %xmm6,64(%rsi)
leaq 80(%rsi),%rsi leaq 80(%rsi),%rsi
subq $96,%rdx subq $0x60,%rdx
ja .Lcbc_dec_loop6 ja .Lcbc_dec_loop6
movdqa %xmm7,%xmm2 movdqa %xmm7,%xmm2
addq $80,%rdx addq $0x50,%rdx
jle .Lcbc_dec_clear_tail_collected jle .Lcbc_dec_clear_tail_collected
movups %xmm7,(%rsi) movups %xmm7,(%rsi)
leaq 16(%rsi),%rsi leaq 16(%rsi),%rsi
.Lcbc_dec_tail: .Lcbc_dec_tail:
movups (%rdi),%xmm2 movups (%rdi),%xmm2
subq $16,%rdx subq $0x10,%rdx
jbe .Lcbc_dec_one jbe .Lcbc_dec_one
movups 16(%rdi),%xmm3 movups 16(%rdi),%xmm3
movaps %xmm2,%xmm11 movaps %xmm2,%xmm11
subq $16,%rdx subq $0x10,%rdx
jbe .Lcbc_dec_two jbe .Lcbc_dec_two
movups 32(%rdi),%xmm4 movups 32(%rdi),%xmm4
movaps %xmm3,%xmm12 movaps %xmm3,%xmm12
subq $16,%rdx subq $0x10,%rdx
jbe .Lcbc_dec_three jbe .Lcbc_dec_three
movups 48(%rdi),%xmm5 movups 48(%rdi),%xmm5
movaps %xmm4,%xmm13 movaps %xmm4,%xmm13
subq $16,%rdx subq $0x10,%rdx
jbe .Lcbc_dec_four jbe .Lcbc_dec_four
movups 64(%rdi),%xmm6 movups 64(%rdi),%xmm6
@ -3026,7 +3026,7 @@ aesni_cbc_encrypt:
movdqa %xmm6,%xmm2 movdqa %xmm6,%xmm2
pxor %xmm6,%xmm6 pxor %xmm6,%xmm6
pxor %xmm7,%xmm7 pxor %xmm7,%xmm7
subq $16,%rdx subq $0x10,%rdx
jmp .Lcbc_dec_tail_collected jmp .Lcbc_dec_tail_collected
.align 16 .align 16
@ -3345,7 +3345,7 @@ __aesni_set_encrypt_key:
pslldq $4,%xmm0 pslldq $4,%xmm0
pxor %xmm3,%xmm0 pxor %xmm3,%xmm0
pshufd $255,%xmm0,%xmm3 pshufd $0xff,%xmm0,%xmm3
pxor %xmm1,%xmm3 pxor %xmm1,%xmm3
pslldq $4,%xmm1 pslldq $4,%xmm1
pxor %xmm1,%xmm3 pxor %xmm1,%xmm3
@ -3432,7 +3432,7 @@ __aesni_set_encrypt_key:
decl %r10d decl %r10d
jz .Ldone_key256 jz .Ldone_key256
pshufd $255,%xmm0,%xmm2 pshufd $0xff,%xmm0,%xmm2
pxor %xmm3,%xmm3 pxor %xmm3,%xmm3
.byte 102,15,56,221,211 .byte 102,15,56,221,211

View File

@ -327,45 +327,45 @@ _bsaes_encrypt8_bitslice:
pxor %xmm2,%xmm5 pxor %xmm2,%xmm5
decl %r10d decl %r10d
jl .Lenc_done jl .Lenc_done
pshufd $147,%xmm15,%xmm7 pshufd $0x93,%xmm15,%xmm7
pshufd $147,%xmm0,%xmm8 pshufd $0x93,%xmm0,%xmm8
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pshufd $147,%xmm3,%xmm9 pshufd $0x93,%xmm3,%xmm9
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pshufd $147,%xmm5,%xmm10 pshufd $0x93,%xmm5,%xmm10
pxor %xmm9,%xmm3 pxor %xmm9,%xmm3
pshufd $147,%xmm2,%xmm11 pshufd $0x93,%xmm2,%xmm11
pxor %xmm10,%xmm5 pxor %xmm10,%xmm5
pshufd $147,%xmm6,%xmm12 pshufd $0x93,%xmm6,%xmm12
pxor %xmm11,%xmm2 pxor %xmm11,%xmm2
pshufd $147,%xmm1,%xmm13 pshufd $0x93,%xmm1,%xmm13
pxor %xmm12,%xmm6 pxor %xmm12,%xmm6
pshufd $147,%xmm4,%xmm14 pshufd $0x93,%xmm4,%xmm14
pxor %xmm13,%xmm1 pxor %xmm13,%xmm1
pxor %xmm14,%xmm4 pxor %xmm14,%xmm4
pxor %xmm15,%xmm8 pxor %xmm15,%xmm8
pxor %xmm4,%xmm7 pxor %xmm4,%xmm7
pxor %xmm4,%xmm8 pxor %xmm4,%xmm8
pshufd $78,%xmm15,%xmm15 pshufd $0x4E,%xmm15,%xmm15
pxor %xmm0,%xmm9 pxor %xmm0,%xmm9
pshufd $78,%xmm0,%xmm0 pshufd $0x4E,%xmm0,%xmm0
pxor %xmm2,%xmm12 pxor %xmm2,%xmm12
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pxor %xmm6,%xmm13 pxor %xmm6,%xmm13
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pxor %xmm5,%xmm11 pxor %xmm5,%xmm11
pshufd $78,%xmm2,%xmm7 pshufd $0x4E,%xmm2,%xmm7
pxor %xmm1,%xmm14 pxor %xmm1,%xmm14
pshufd $78,%xmm6,%xmm8 pshufd $0x4E,%xmm6,%xmm8
pxor %xmm3,%xmm10 pxor %xmm3,%xmm10
pshufd $78,%xmm5,%xmm2 pshufd $0x4E,%xmm5,%xmm2
pxor %xmm4,%xmm10 pxor %xmm4,%xmm10
pshufd $78,%xmm4,%xmm6 pshufd $0x4E,%xmm4,%xmm6
pxor %xmm4,%xmm11 pxor %xmm4,%xmm11
pshufd $78,%xmm1,%xmm5 pshufd $0x4E,%xmm1,%xmm5
pxor %xmm11,%xmm7 pxor %xmm11,%xmm7
pshufd $78,%xmm3,%xmm1 pshufd $0x4E,%xmm3,%xmm1
pxor %xmm12,%xmm8 pxor %xmm12,%xmm8
pxor %xmm10,%xmm2 pxor %xmm10,%xmm2
pxor %xmm14,%xmm6 pxor %xmm14,%xmm6
@ -799,24 +799,24 @@ _bsaes_decrypt8:
decl %r10d decl %r10d
jl .Ldec_done jl .Ldec_done
pshufd $78,%xmm15,%xmm7 pshufd $0x4E,%xmm15,%xmm7
pshufd $78,%xmm2,%xmm13 pshufd $0x4E,%xmm2,%xmm13
pxor %xmm15,%xmm7 pxor %xmm15,%xmm7
pshufd $78,%xmm4,%xmm14 pshufd $0x4E,%xmm4,%xmm14
pxor %xmm2,%xmm13 pxor %xmm2,%xmm13
pshufd $78,%xmm0,%xmm8 pshufd $0x4E,%xmm0,%xmm8
pxor %xmm4,%xmm14 pxor %xmm4,%xmm14
pshufd $78,%xmm5,%xmm9 pshufd $0x4E,%xmm5,%xmm9
pxor %xmm0,%xmm8 pxor %xmm0,%xmm8
pshufd $78,%xmm3,%xmm10 pshufd $0x4E,%xmm3,%xmm10
pxor %xmm5,%xmm9 pxor %xmm5,%xmm9
pxor %xmm13,%xmm15 pxor %xmm13,%xmm15
pxor %xmm13,%xmm0 pxor %xmm13,%xmm0
pshufd $78,%xmm1,%xmm11 pshufd $0x4E,%xmm1,%xmm11
pxor %xmm3,%xmm10 pxor %xmm3,%xmm10
pxor %xmm7,%xmm5 pxor %xmm7,%xmm5
pxor %xmm8,%xmm3 pxor %xmm8,%xmm3
pshufd $78,%xmm6,%xmm12 pshufd $0x4E,%xmm6,%xmm12
pxor %xmm1,%xmm11 pxor %xmm1,%xmm11
pxor %xmm14,%xmm0 pxor %xmm14,%xmm0
pxor %xmm9,%xmm1 pxor %xmm9,%xmm1
@ -830,45 +830,45 @@ _bsaes_decrypt8:
pxor %xmm14,%xmm1 pxor %xmm14,%xmm1
pxor %xmm14,%xmm6 pxor %xmm14,%xmm6
pxor %xmm12,%xmm4 pxor %xmm12,%xmm4
pshufd $147,%xmm15,%xmm7 pshufd $0x93,%xmm15,%xmm7
pshufd $147,%xmm0,%xmm8 pshufd $0x93,%xmm0,%xmm8
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pshufd $147,%xmm5,%xmm9 pshufd $0x93,%xmm5,%xmm9
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pshufd $147,%xmm3,%xmm10 pshufd $0x93,%xmm3,%xmm10
pxor %xmm9,%xmm5 pxor %xmm9,%xmm5
pshufd $147,%xmm1,%xmm11 pshufd $0x93,%xmm1,%xmm11
pxor %xmm10,%xmm3 pxor %xmm10,%xmm3
pshufd $147,%xmm6,%xmm12 pshufd $0x93,%xmm6,%xmm12
pxor %xmm11,%xmm1 pxor %xmm11,%xmm1
pshufd $147,%xmm2,%xmm13 pshufd $0x93,%xmm2,%xmm13
pxor %xmm12,%xmm6 pxor %xmm12,%xmm6
pshufd $147,%xmm4,%xmm14 pshufd $0x93,%xmm4,%xmm14
pxor %xmm13,%xmm2 pxor %xmm13,%xmm2
pxor %xmm14,%xmm4 pxor %xmm14,%xmm4
pxor %xmm15,%xmm8 pxor %xmm15,%xmm8
pxor %xmm4,%xmm7 pxor %xmm4,%xmm7
pxor %xmm4,%xmm8 pxor %xmm4,%xmm8
pshufd $78,%xmm15,%xmm15 pshufd $0x4E,%xmm15,%xmm15
pxor %xmm0,%xmm9 pxor %xmm0,%xmm9
pshufd $78,%xmm0,%xmm0 pshufd $0x4E,%xmm0,%xmm0
pxor %xmm1,%xmm12 pxor %xmm1,%xmm12
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pxor %xmm6,%xmm13 pxor %xmm6,%xmm13
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pxor %xmm3,%xmm11 pxor %xmm3,%xmm11
pshufd $78,%xmm1,%xmm7 pshufd $0x4E,%xmm1,%xmm7
pxor %xmm2,%xmm14 pxor %xmm2,%xmm14
pshufd $78,%xmm6,%xmm8 pshufd $0x4E,%xmm6,%xmm8
pxor %xmm5,%xmm10 pxor %xmm5,%xmm10
pshufd $78,%xmm3,%xmm1 pshufd $0x4E,%xmm3,%xmm1
pxor %xmm4,%xmm10 pxor %xmm4,%xmm10
pshufd $78,%xmm4,%xmm6 pshufd $0x4E,%xmm4,%xmm6
pxor %xmm4,%xmm11 pxor %xmm4,%xmm11
pshufd $78,%xmm2,%xmm3 pshufd $0x4E,%xmm2,%xmm3
pxor %xmm11,%xmm7 pxor %xmm11,%xmm7
pshufd $78,%xmm5,%xmm2 pshufd $0x4E,%xmm5,%xmm2
pxor %xmm12,%xmm8 pxor %xmm12,%xmm8
pxor %xmm1,%xmm10 pxor %xmm1,%xmm10
pxor %xmm14,%xmm6 pxor %xmm14,%xmm6
@ -1559,20 +1559,20 @@ bsaes_xts_encrypt:
movdqa %xmm7,(%rax) movdqa %xmm7,(%rax)
andq $-16,%r14 andq $-16,%r14
subq $128,%rsp subq $0x80,%rsp
movdqa 32(%rbp),%xmm6 movdqa 32(%rbp),%xmm6
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa .Lxts_magic(%rip),%xmm12 movdqa .Lxts_magic(%rip),%xmm12
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
subq $128,%r14 subq $0x80,%r14
jc .Lxts_enc_short jc .Lxts_enc_short
jmp .Lxts_enc_loop jmp .Lxts_enc_loop
.align 16 .align 16
.Lxts_enc_loop: .Lxts_enc_loop:
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm15 movdqa %xmm6,%xmm15
movdqa %xmm6,0(%rsp) movdqa %xmm6,0(%rsp)
@ -1580,7 +1580,7 @@ bsaes_xts_encrypt:
pand %xmm12,%xmm13 pand %xmm12,%xmm13
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm0 movdqa %xmm6,%xmm0
movdqa %xmm6,16(%rsp) movdqa %xmm6,16(%rsp)
@ -1589,7 +1589,7 @@ bsaes_xts_encrypt:
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 0(%r12),%xmm7 movdqu 0(%r12),%xmm7
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm1 movdqa %xmm6,%xmm1
movdqa %xmm6,32(%rsp) movdqa %xmm6,32(%rsp)
@ -1599,7 +1599,7 @@ bsaes_xts_encrypt:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 16(%r12),%xmm8 movdqu 16(%r12),%xmm8
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm2 movdqa %xmm6,%xmm2
movdqa %xmm6,48(%rsp) movdqa %xmm6,48(%rsp)
@ -1609,7 +1609,7 @@ bsaes_xts_encrypt:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 32(%r12),%xmm9 movdqu 32(%r12),%xmm9
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm3 movdqa %xmm6,%xmm3
movdqa %xmm6,64(%rsp) movdqa %xmm6,64(%rsp)
@ -1619,7 +1619,7 @@ bsaes_xts_encrypt:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 48(%r12),%xmm10 movdqu 48(%r12),%xmm10
pxor %xmm9,%xmm1 pxor %xmm9,%xmm1
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm4 movdqa %xmm6,%xmm4
movdqa %xmm6,80(%rsp) movdqa %xmm6,80(%rsp)
@ -1629,7 +1629,7 @@ bsaes_xts_encrypt:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 64(%r12),%xmm11 movdqu 64(%r12),%xmm11
pxor %xmm10,%xmm2 pxor %xmm10,%xmm2
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm5 movdqa %xmm6,%xmm5
movdqa %xmm6,96(%rsp) movdqa %xmm6,96(%rsp)
@ -1673,20 +1673,20 @@ bsaes_xts_encrypt:
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa .Lxts_magic(%rip),%xmm12 movdqa .Lxts_magic(%rip),%xmm12
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
paddq %xmm6,%xmm6 paddq %xmm6,%xmm6
pand %xmm12,%xmm13 pand %xmm12,%xmm13
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
subq $128,%r14 subq $0x80,%r14
jnc .Lxts_enc_loop jnc .Lxts_enc_loop
.Lxts_enc_short: .Lxts_enc_short:
addq $128,%r14 addq $0x80,%r14
jz .Lxts_enc_done jz .Lxts_enc_done
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm15 movdqa %xmm6,%xmm15
movdqa %xmm6,0(%rsp) movdqa %xmm6,0(%rsp)
@ -1694,7 +1694,7 @@ bsaes_xts_encrypt:
pand %xmm12,%xmm13 pand %xmm12,%xmm13
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm0 movdqa %xmm6,%xmm0
movdqa %xmm6,16(%rsp) movdqa %xmm6,16(%rsp)
@ -1705,7 +1705,7 @@ bsaes_xts_encrypt:
movdqu 0(%r12),%xmm7 movdqu 0(%r12),%xmm7
cmpq $16,%r14 cmpq $16,%r14
je .Lxts_enc_1 je .Lxts_enc_1
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm1 movdqa %xmm6,%xmm1
movdqa %xmm6,32(%rsp) movdqa %xmm6,32(%rsp)
@ -1717,7 +1717,7 @@ bsaes_xts_encrypt:
cmpq $32,%r14 cmpq $32,%r14
je .Lxts_enc_2 je .Lxts_enc_2
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm2 movdqa %xmm6,%xmm2
movdqa %xmm6,48(%rsp) movdqa %xmm6,48(%rsp)
@ -1729,7 +1729,7 @@ bsaes_xts_encrypt:
cmpq $48,%r14 cmpq $48,%r14
je .Lxts_enc_3 je .Lxts_enc_3
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm3 movdqa %xmm6,%xmm3
movdqa %xmm6,64(%rsp) movdqa %xmm6,64(%rsp)
@ -1741,7 +1741,7 @@ bsaes_xts_encrypt:
cmpq $64,%r14 cmpq $64,%r14
je .Lxts_enc_4 je .Lxts_enc_4
pxor %xmm9,%xmm1 pxor %xmm9,%xmm1
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm4 movdqa %xmm6,%xmm4
movdqa %xmm6,80(%rsp) movdqa %xmm6,80(%rsp)
@ -1753,7 +1753,7 @@ bsaes_xts_encrypt:
cmpq $80,%r14 cmpq $80,%r14
je .Lxts_enc_5 je .Lxts_enc_5
pxor %xmm10,%xmm2 pxor %xmm10,%xmm2
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm5 movdqa %xmm6,%xmm5
movdqa %xmm6,96(%rsp) movdqa %xmm6,96(%rsp)
@ -2019,20 +2019,20 @@ bsaes_xts_decrypt:
shlq $4,%rax shlq $4,%rax
subq %rax,%r14 subq %rax,%r14
subq $128,%rsp subq $0x80,%rsp
movdqa 32(%rbp),%xmm6 movdqa 32(%rbp),%xmm6
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa .Lxts_magic(%rip),%xmm12 movdqa .Lxts_magic(%rip),%xmm12
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
subq $128,%r14 subq $0x80,%r14
jc .Lxts_dec_short jc .Lxts_dec_short
jmp .Lxts_dec_loop jmp .Lxts_dec_loop
.align 16 .align 16
.Lxts_dec_loop: .Lxts_dec_loop:
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm15 movdqa %xmm6,%xmm15
movdqa %xmm6,0(%rsp) movdqa %xmm6,0(%rsp)
@ -2040,7 +2040,7 @@ bsaes_xts_decrypt:
pand %xmm12,%xmm13 pand %xmm12,%xmm13
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm0 movdqa %xmm6,%xmm0
movdqa %xmm6,16(%rsp) movdqa %xmm6,16(%rsp)
@ -2049,7 +2049,7 @@ bsaes_xts_decrypt:
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 0(%r12),%xmm7 movdqu 0(%r12),%xmm7
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm1 movdqa %xmm6,%xmm1
movdqa %xmm6,32(%rsp) movdqa %xmm6,32(%rsp)
@ -2059,7 +2059,7 @@ bsaes_xts_decrypt:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 16(%r12),%xmm8 movdqu 16(%r12),%xmm8
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm2 movdqa %xmm6,%xmm2
movdqa %xmm6,48(%rsp) movdqa %xmm6,48(%rsp)
@ -2069,7 +2069,7 @@ bsaes_xts_decrypt:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 32(%r12),%xmm9 movdqu 32(%r12),%xmm9
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm3 movdqa %xmm6,%xmm3
movdqa %xmm6,64(%rsp) movdqa %xmm6,64(%rsp)
@ -2079,7 +2079,7 @@ bsaes_xts_decrypt:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 48(%r12),%xmm10 movdqu 48(%r12),%xmm10
pxor %xmm9,%xmm1 pxor %xmm9,%xmm1
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm4 movdqa %xmm6,%xmm4
movdqa %xmm6,80(%rsp) movdqa %xmm6,80(%rsp)
@ -2089,7 +2089,7 @@ bsaes_xts_decrypt:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 64(%r12),%xmm11 movdqu 64(%r12),%xmm11
pxor %xmm10,%xmm2 pxor %xmm10,%xmm2
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm5 movdqa %xmm6,%xmm5
movdqa %xmm6,96(%rsp) movdqa %xmm6,96(%rsp)
@ -2133,20 +2133,20 @@ bsaes_xts_decrypt:
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa .Lxts_magic(%rip),%xmm12 movdqa .Lxts_magic(%rip),%xmm12
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
paddq %xmm6,%xmm6 paddq %xmm6,%xmm6
pand %xmm12,%xmm13 pand %xmm12,%xmm13
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
subq $128,%r14 subq $0x80,%r14
jnc .Lxts_dec_loop jnc .Lxts_dec_loop
.Lxts_dec_short: .Lxts_dec_short:
addq $128,%r14 addq $0x80,%r14
jz .Lxts_dec_done jz .Lxts_dec_done
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm15 movdqa %xmm6,%xmm15
movdqa %xmm6,0(%rsp) movdqa %xmm6,0(%rsp)
@ -2154,7 +2154,7 @@ bsaes_xts_decrypt:
pand %xmm12,%xmm13 pand %xmm12,%xmm13
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm0 movdqa %xmm6,%xmm0
movdqa %xmm6,16(%rsp) movdqa %xmm6,16(%rsp)
@ -2165,7 +2165,7 @@ bsaes_xts_decrypt:
movdqu 0(%r12),%xmm7 movdqu 0(%r12),%xmm7
cmpq $16,%r14 cmpq $16,%r14
je .Lxts_dec_1 je .Lxts_dec_1
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm1 movdqa %xmm6,%xmm1
movdqa %xmm6,32(%rsp) movdqa %xmm6,32(%rsp)
@ -2177,7 +2177,7 @@ bsaes_xts_decrypt:
cmpq $32,%r14 cmpq $32,%r14
je .Lxts_dec_2 je .Lxts_dec_2
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm2 movdqa %xmm6,%xmm2
movdqa %xmm6,48(%rsp) movdqa %xmm6,48(%rsp)
@ -2189,7 +2189,7 @@ bsaes_xts_decrypt:
cmpq $48,%r14 cmpq $48,%r14
je .Lxts_dec_3 je .Lxts_dec_3
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm3 movdqa %xmm6,%xmm3
movdqa %xmm6,64(%rsp) movdqa %xmm6,64(%rsp)
@ -2201,7 +2201,7 @@ bsaes_xts_decrypt:
cmpq $64,%r14 cmpq $64,%r14
je .Lxts_dec_4 je .Lxts_dec_4
pxor %xmm9,%xmm1 pxor %xmm9,%xmm1
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm4 movdqa %xmm6,%xmm4
movdqa %xmm6,80(%rsp) movdqa %xmm6,80(%rsp)
@ -2213,7 +2213,7 @@ bsaes_xts_decrypt:
cmpq $80,%r14 cmpq $80,%r14
je .Lxts_dec_5 je .Lxts_dec_5
pxor %xmm10,%xmm2 pxor %xmm10,%xmm2
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm5 movdqa %xmm6,%xmm5
movdqa %xmm6,96(%rsp) movdqa %xmm6,96(%rsp)
@ -2390,7 +2390,7 @@ bsaes_xts_decrypt:
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa .Lxts_magic(%rip),%xmm12 movdqa .Lxts_magic(%rip),%xmm12
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
movdqa %xmm6,%xmm5 movdqa %xmm6,%xmm5
paddq %xmm6,%xmm6 paddq %xmm6,%xmm6
pand %xmm12,%xmm13 pand %xmm12,%xmm13

View File

@ -61,7 +61,7 @@ _vpaes_encrypt_core:
addq $16,%r11 addq $16,%r11
pxor %xmm0,%xmm3 pxor %xmm0,%xmm3
.byte 102,15,56,0,193 .byte 102,15,56,0,193
andq $48,%r11 andq $0x30,%r11
subq $1,%rax subq $1,%rax
pxor %xmm3,%xmm0 pxor %xmm3,%xmm0
@ -121,10 +121,10 @@ _vpaes_decrypt_core:
pand %xmm9,%xmm0 pand %xmm9,%xmm0
.byte 102,15,56,0,208 .byte 102,15,56,0,208
movdqa .Lk_dipt+16(%rip),%xmm0 movdqa .Lk_dipt+16(%rip),%xmm0
xorq $48,%r11 xorq $0x30,%r11
leaq .Lk_dsbd(%rip),%r10 leaq .Lk_dsbd(%rip),%r10
.byte 102,15,56,0,193 .byte 102,15,56,0,193
andq $48,%r11 andq $0x30,%r11
pxor %xmm5,%xmm2 pxor %xmm5,%xmm2
movdqa .Lk_mc_forward+48(%rip),%xmm5 movdqa .Lk_mc_forward+48(%rip),%xmm5
pxor %xmm2,%xmm0 pxor %xmm2,%xmm0
@ -243,7 +243,7 @@ _vpaes_schedule_core:
movdqa (%r8,%r10,1),%xmm1 movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,217 .byte 102,15,56,0,217
movdqu %xmm3,(%rdx) movdqu %xmm3,(%rdx)
xorq $48,%r8 xorq $0x30,%r8
.Lschedule_go: .Lschedule_go:
cmpl $192,%esi cmpl $192,%esi
@ -333,7 +333,7 @@ _vpaes_schedule_core:
call _vpaes_schedule_mangle call _vpaes_schedule_mangle
pshufd $255,%xmm0,%xmm0 pshufd $0xFF,%xmm0,%xmm0
movdqa %xmm7,%xmm5 movdqa %xmm7,%xmm5
movdqa %xmm6,%xmm7 movdqa %xmm6,%xmm7
call _vpaes_schedule_low_round call _vpaes_schedule_low_round
@ -400,8 +400,8 @@ _vpaes_schedule_core:
.type _vpaes_schedule_192_smear,@function .type _vpaes_schedule_192_smear,@function
.align 16 .align 16
_vpaes_schedule_192_smear: _vpaes_schedule_192_smear:
pshufd $128,%xmm6,%xmm1 pshufd $0x80,%xmm6,%xmm1
pshufd $254,%xmm7,%xmm0 pshufd $0xFE,%xmm7,%xmm0
pxor %xmm1,%xmm6 pxor %xmm1,%xmm6
pxor %xmm1,%xmm1 pxor %xmm1,%xmm1
pxor %xmm0,%xmm6 pxor %xmm0,%xmm6
@ -438,7 +438,7 @@ _vpaes_schedule_round:
pxor %xmm1,%xmm7 pxor %xmm1,%xmm7
pshufd $255,%xmm0,%xmm0 pshufd $0xFF,%xmm0,%xmm0
.byte 102,15,58,15,192,1 .byte 102,15,58,15,192,1
@ -597,7 +597,7 @@ _vpaes_schedule_mangle:
movdqa (%r8,%r10,1),%xmm1 movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,217 .byte 102,15,56,0,217
addq $-16,%r8 addq $-16,%r8
andq $48,%r8 andq $0x30,%r8
movdqu %xmm3,(%rdx) movdqu %xmm3,(%rdx)
.byte 0xf3,0xc3 .byte 0xf3,0xc3
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
@ -616,7 +616,7 @@ vpaes_set_encrypt_key:
movl %eax,240(%rdx) movl %eax,240(%rdx)
movl $0,%ecx movl $0,%ecx
movl $48,%r8d movl $0x30,%r8d
call _vpaes_schedule_core call _vpaes_schedule_core
xorl %eax,%eax xorl %eax,%eax
.byte 0xf3,0xc3 .byte 0xf3,0xc3

View File

@ -466,48 +466,94 @@ rsaz_512_mul_gather4:
pushq %r14 pushq %r14
pushq %r15 pushq %r15
movl %r9d,%r9d subq $152,%rsp
subq $128+24,%rsp
.Lmul_gather4_body: .Lmul_gather4_body:
movl 64(%rdx,%r9,4),%eax movd %r9d,%xmm8
.byte 102,72,15,110,199 movdqa .Linc+16(%rip),%xmm1
movl (%rdx,%r9,4),%ebx movdqa .Linc(%rip),%xmm0
.byte 102,72,15,110,201
movq %r8,128(%rsp) pshufd $0,%xmm8,%xmm8
movdqa %xmm1,%xmm7
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm8,%xmm0
movdqa %xmm7,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm8,%xmm1
movdqa %xmm7,%xmm4
paddd %xmm2,%xmm3
pcmpeqd %xmm8,%xmm2
movdqa %xmm7,%xmm5
paddd %xmm3,%xmm4
pcmpeqd %xmm8,%xmm3
movdqa %xmm7,%xmm6
paddd %xmm4,%xmm5
pcmpeqd %xmm8,%xmm4
paddd %xmm5,%xmm6
pcmpeqd %xmm8,%xmm5
paddd %xmm6,%xmm7
pcmpeqd %xmm8,%xmm6
pcmpeqd %xmm8,%xmm7
movdqa 0(%rdx),%xmm8
movdqa 16(%rdx),%xmm9
movdqa 32(%rdx),%xmm10
movdqa 48(%rdx),%xmm11
pand %xmm0,%xmm8
movdqa 64(%rdx),%xmm12
pand %xmm1,%xmm9
movdqa 80(%rdx),%xmm13
pand %xmm2,%xmm10
movdqa 96(%rdx),%xmm14
pand %xmm3,%xmm11
movdqa 112(%rdx),%xmm15
leaq 128(%rdx),%rbp
pand %xmm4,%xmm12
pand %xmm5,%xmm13
pand %xmm6,%xmm14
pand %xmm7,%xmm15
por %xmm10,%xmm8
por %xmm11,%xmm9
por %xmm12,%xmm8
por %xmm13,%xmm9
por %xmm14,%xmm8
por %xmm15,%xmm9
por %xmm9,%xmm8
pshufd $0x4e,%xmm8,%xmm9
por %xmm9,%xmm8
.byte 102,76,15,126,195
movq %r8,128(%rsp)
movq %rdi,128+8(%rsp)
movq %rcx,128+16(%rsp)
shlq $32,%rax
orq %rax,%rbx
movq (%rsi),%rax movq (%rsi),%rax
movq 8(%rsi),%rcx movq 8(%rsi),%rcx
leaq 128(%rdx,%r9,4),%rbp
mulq %rbx mulq %rbx
movq %rax,(%rsp) movq %rax,(%rsp)
movq %rcx,%rax movq %rcx,%rax
movq %rdx,%r8 movq %rdx,%r8
mulq %rbx mulq %rbx
movd (%rbp),%xmm4
addq %rax,%r8 addq %rax,%r8
movq 16(%rsi),%rax movq 16(%rsi),%rax
movq %rdx,%r9 movq %rdx,%r9
adcq $0,%r9 adcq $0,%r9
mulq %rbx mulq %rbx
movd 64(%rbp),%xmm5
addq %rax,%r9 addq %rax,%r9
movq 24(%rsi),%rax movq 24(%rsi),%rax
movq %rdx,%r10 movq %rdx,%r10
adcq $0,%r10 adcq $0,%r10
mulq %rbx mulq %rbx
pslldq $4,%xmm5
addq %rax,%r10 addq %rax,%r10
movq 32(%rsi),%rax movq 32(%rsi),%rax
movq %rdx,%r11 movq %rdx,%r11
adcq $0,%r11 adcq $0,%r11
mulq %rbx mulq %rbx
por %xmm5,%xmm4
addq %rax,%r11 addq %rax,%r11
movq 40(%rsi),%rax movq 40(%rsi),%rax
movq %rdx,%r12 movq %rdx,%r12
@ -520,14 +566,12 @@ rsaz_512_mul_gather4:
adcq $0,%r13 adcq $0,%r13
mulq %rbx mulq %rbx
leaq 128(%rbp),%rbp
addq %rax,%r13 addq %rax,%r13
movq 56(%rsi),%rax movq 56(%rsi),%rax
movq %rdx,%r14 movq %rdx,%r14
adcq $0,%r14 adcq $0,%r14
mulq %rbx mulq %rbx
.byte 102,72,15,126,227
addq %rax,%r14 addq %rax,%r14
movq (%rsi),%rax movq (%rsi),%rax
movq %rdx,%r15 movq %rdx,%r15
@ -539,6 +583,35 @@ rsaz_512_mul_gather4:
.align 32 .align 32
.Loop_mul_gather: .Loop_mul_gather:
movdqa 0(%rbp),%xmm8
movdqa 16(%rbp),%xmm9
movdqa 32(%rbp),%xmm10
movdqa 48(%rbp),%xmm11
pand %xmm0,%xmm8
movdqa 64(%rbp),%xmm12
pand %xmm1,%xmm9
movdqa 80(%rbp),%xmm13
pand %xmm2,%xmm10
movdqa 96(%rbp),%xmm14
pand %xmm3,%xmm11
movdqa 112(%rbp),%xmm15
leaq 128(%rbp),%rbp
pand %xmm4,%xmm12
pand %xmm5,%xmm13
pand %xmm6,%xmm14
pand %xmm7,%xmm15
por %xmm10,%xmm8
por %xmm11,%xmm9
por %xmm12,%xmm8
por %xmm13,%xmm9
por %xmm14,%xmm8
por %xmm15,%xmm9
por %xmm9,%xmm8
pshufd $0x4e,%xmm8,%xmm9
por %xmm9,%xmm8
.byte 102,76,15,126,195
mulq %rbx mulq %rbx
addq %rax,%r8 addq %rax,%r8
movq 8(%rsi),%rax movq 8(%rsi),%rax
@ -547,7 +620,6 @@ rsaz_512_mul_gather4:
adcq $0,%r8 adcq $0,%r8
mulq %rbx mulq %rbx
movd (%rbp),%xmm4
addq %rax,%r9 addq %rax,%r9
movq 16(%rsi),%rax movq 16(%rsi),%rax
adcq $0,%rdx adcq $0,%rdx
@ -556,7 +628,6 @@ rsaz_512_mul_gather4:
adcq $0,%r9 adcq $0,%r9
mulq %rbx mulq %rbx
movd 64(%rbp),%xmm5
addq %rax,%r10 addq %rax,%r10
movq 24(%rsi),%rax movq 24(%rsi),%rax
adcq $0,%rdx adcq $0,%rdx
@ -565,7 +636,6 @@ rsaz_512_mul_gather4:
adcq $0,%r10 adcq $0,%r10
mulq %rbx mulq %rbx
pslldq $4,%xmm5
addq %rax,%r11 addq %rax,%r11
movq 32(%rsi),%rax movq 32(%rsi),%rax
adcq $0,%rdx adcq $0,%rdx
@ -574,7 +644,6 @@ rsaz_512_mul_gather4:
adcq $0,%r11 adcq $0,%r11
mulq %rbx mulq %rbx
por %xmm5,%xmm4
addq %rax,%r12 addq %rax,%r12
movq 40(%rsi),%rax movq 40(%rsi),%rax
adcq $0,%rdx adcq $0,%rdx
@ -599,7 +668,6 @@ rsaz_512_mul_gather4:
adcq $0,%r14 adcq $0,%r14
mulq %rbx mulq %rbx
.byte 102,72,15,126,227
addq %rax,%r15 addq %rax,%r15
movq (%rsi),%rax movq (%rsi),%rax
adcq $0,%rdx adcq $0,%rdx
@ -607,7 +675,6 @@ rsaz_512_mul_gather4:
movq %rdx,%r15 movq %rdx,%r15
adcq $0,%r15 adcq $0,%r15
leaq 128(%rbp),%rbp
leaq 8(%rdi),%rdi leaq 8(%rdi),%rdi
decl %ecx decl %ecx
@ -622,8 +689,8 @@ rsaz_512_mul_gather4:
movq %r14,48(%rdi) movq %r14,48(%rdi)
movq %r15,56(%rdi) movq %r15,56(%rdi)
.byte 102,72,15,126,199 movq 128+8(%rsp),%rdi
.byte 102,72,15,126,205 movq 128+16(%rsp),%rbp
movq (%rsp),%r8 movq (%rsp),%r8
movq 8(%rsp),%r9 movq 8(%rsp),%r9
@ -673,7 +740,7 @@ rsaz_512_mul_scatter4:
movl %r9d,%r9d movl %r9d,%r9d
subq $128+24,%rsp subq $128+24,%rsp
.Lmul_scatter4_body: .Lmul_scatter4_body:
leaq (%r8,%r9,4),%r8 leaq (%r8,%r9,8),%r8
.byte 102,72,15,110,199 .byte 102,72,15,110,199
.byte 102,72,15,110,202 .byte 102,72,15,110,202
.byte 102,73,15,110,208 .byte 102,73,15,110,208
@ -709,30 +776,14 @@ rsaz_512_mul_scatter4:
call __rsaz_512_subtract call __rsaz_512_subtract
movl %r8d,0(%rsi) movq %r8,0(%rsi)
shrq $32,%r8 movq %r9,128(%rsi)
movl %r9d,128(%rsi) movq %r10,256(%rsi)
shrq $32,%r9 movq %r11,384(%rsi)
movl %r10d,256(%rsi) movq %r12,512(%rsi)
shrq $32,%r10 movq %r13,640(%rsi)
movl %r11d,384(%rsi) movq %r14,768(%rsi)
shrq $32,%r11 movq %r15,896(%rsi)
movl %r12d,512(%rsi)
shrq $32,%r12
movl %r13d,640(%rsi)
shrq $32,%r13
movl %r14d,768(%rsi)
shrq $32,%r14
movl %r15d,896(%rsi)
shrq $32,%r15
movl %r8d,64(%rsi)
movl %r9d,192(%rsi)
movl %r10d,320(%rsi)
movl %r11d,448(%rsi)
movl %r12d,576(%rsi)
movl %r13d,704(%rsi)
movl %r14d,832(%rsi)
movl %r15d,960(%rsi)
leaq 128+24+48(%rsp),%rax leaq 128+24+48(%rsp),%rax
movq -48(%rax),%r15 movq -48(%rax),%r15
@ -1087,16 +1138,14 @@ __rsaz_512_mul:
.type rsaz_512_scatter4,@function .type rsaz_512_scatter4,@function
.align 16 .align 16
rsaz_512_scatter4: rsaz_512_scatter4:
leaq (%rdi,%rdx,4),%rdi leaq (%rdi,%rdx,8),%rdi
movl $8,%r9d movl $8,%r9d
jmp .Loop_scatter jmp .Loop_scatter
.align 16 .align 16
.Loop_scatter: .Loop_scatter:
movq (%rsi),%rax movq (%rsi),%rax
leaq 8(%rsi),%rsi leaq 8(%rsi),%rsi
movl %eax,(%rdi) movq %rax,(%rdi)
shrq $32,%rax
movl %eax,64(%rdi)
leaq 128(%rdi),%rdi leaq 128(%rdi),%rdi
decl %r9d decl %r9d
jnz .Loop_scatter jnz .Loop_scatter
@ -1108,20 +1157,73 @@ rsaz_512_scatter4:
.type rsaz_512_gather4,@function .type rsaz_512_gather4,@function
.align 16 .align 16
rsaz_512_gather4: rsaz_512_gather4:
leaq (%rsi,%rdx,4),%rsi movd %edx,%xmm8
movdqa .Linc+16(%rip),%xmm1
movdqa .Linc(%rip),%xmm0
pshufd $0,%xmm8,%xmm8
movdqa %xmm1,%xmm7
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm8,%xmm0
movdqa %xmm7,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm8,%xmm1
movdqa %xmm7,%xmm4
paddd %xmm2,%xmm3
pcmpeqd %xmm8,%xmm2
movdqa %xmm7,%xmm5
paddd %xmm3,%xmm4
pcmpeqd %xmm8,%xmm3
movdqa %xmm7,%xmm6
paddd %xmm4,%xmm5
pcmpeqd %xmm8,%xmm4
paddd %xmm5,%xmm6
pcmpeqd %xmm8,%xmm5
paddd %xmm6,%xmm7
pcmpeqd %xmm8,%xmm6
pcmpeqd %xmm8,%xmm7
movl $8,%r9d movl $8,%r9d
jmp .Loop_gather jmp .Loop_gather
.align 16 .align 16
.Loop_gather: .Loop_gather:
movl (%rsi),%eax movdqa 0(%rsi),%xmm8
movl 64(%rsi),%r8d movdqa 16(%rsi),%xmm9
movdqa 32(%rsi),%xmm10
movdqa 48(%rsi),%xmm11
pand %xmm0,%xmm8
movdqa 64(%rsi),%xmm12
pand %xmm1,%xmm9
movdqa 80(%rsi),%xmm13
pand %xmm2,%xmm10
movdqa 96(%rsi),%xmm14
pand %xmm3,%xmm11
movdqa 112(%rsi),%xmm15
leaq 128(%rsi),%rsi leaq 128(%rsi),%rsi
shlq $32,%r8 pand %xmm4,%xmm12
orq %r8,%rax pand %xmm5,%xmm13
movq %rax,(%rdi) pand %xmm6,%xmm14
pand %xmm7,%xmm15
por %xmm10,%xmm8
por %xmm11,%xmm9
por %xmm12,%xmm8
por %xmm13,%xmm9
por %xmm14,%xmm8
por %xmm15,%xmm9
por %xmm9,%xmm8
pshufd $0x4e,%xmm8,%xmm9
por %xmm9,%xmm8
movq %xmm8,(%rdi)
leaq 8(%rdi),%rdi leaq 8(%rdi),%rdi
decl %r9d decl %r9d
jnz .Loop_gather jnz .Loop_gather
.byte 0xf3,0xc3 .byte 0xf3,0xc3
.LSEH_end_rsaz_512_gather4:
.size rsaz_512_gather4,.-rsaz_512_gather4 .size rsaz_512_gather4,.-rsaz_512_gather4
.align 64
.Linc:
.long 0,0, 1,1
.long 2,2, 2,2
#endif #endif

View File

@ -636,20 +636,20 @@ bn_sqr8x_mont:
leaq -64(%rsp,%r9,4),%r11 leaq -64(%rsp,%r9,2),%r11
movq (%r8),%r8 movq (%r8),%r8
subq %rsi,%r11 subq %rsi,%r11
andq $4095,%r11 andq $4095,%r11
cmpq %r11,%r10 cmpq %r11,%r10
jb .Lsqr8x_sp_alt jb .Lsqr8x_sp_alt
subq %r11,%rsp subq %r11,%rsp
leaq -64(%rsp,%r9,4),%rsp leaq -64(%rsp,%r9,2),%rsp
jmp .Lsqr8x_sp_done jmp .Lsqr8x_sp_done
.align 32 .align 32
.Lsqr8x_sp_alt: .Lsqr8x_sp_alt:
leaq 4096-64(,%r9,4),%r10 leaq 4096-64(,%r9,2),%r10
leaq -64(%rsp,%r9,4),%rsp leaq -64(%rsp,%r9,2),%rsp
subq %r10,%r11 subq %r10,%r11
movq $0,%r10 movq $0,%r10
cmovcq %r10,%r11 cmovcq %r10,%r11
@ -659,58 +659,80 @@ bn_sqr8x_mont:
movq %r9,%r10 movq %r9,%r10
negq %r9 negq %r9
leaq 64(%rsp,%r9,2),%r11
movq %r8,32(%rsp) movq %r8,32(%rsp)
movq %rax,40(%rsp) movq %rax,40(%rsp)
.Lsqr8x_body: .Lsqr8x_body:
movq %r9,%rbp .byte 102,72,15,110,209
.byte 102,73,15,110,211
shrq $3+2,%rbp
movl OPENSSL_ia32cap_P+8(%rip),%eax
jmp .Lsqr8x_copy_n
.align 32
.Lsqr8x_copy_n:
movq 0(%rcx),%xmm0
movq 8(%rcx),%xmm1
movq 16(%rcx),%xmm3
movq 24(%rcx),%xmm4
leaq 32(%rcx),%rcx
movdqa %xmm0,0(%r11)
movdqa %xmm1,16(%r11)
movdqa %xmm3,32(%r11)
movdqa %xmm4,48(%r11)
leaq 64(%r11),%r11
decq %rbp
jnz .Lsqr8x_copy_n
pxor %xmm0,%xmm0 pxor %xmm0,%xmm0
.byte 102,72,15,110,207 .byte 102,72,15,110,207
.byte 102,73,15,110,218 .byte 102,73,15,110,218
call bn_sqr8x_internal call bn_sqr8x_internal
pxor %xmm0,%xmm0
leaq 48(%rsp),%rax
leaq 64(%rsp,%r9,2),%rdx
shrq $3+2,%r9 leaq (%rdi,%r9,1),%rbx
movq 40(%rsp),%rsi movq %r9,%rcx
jmp .Lsqr8x_zero movq %r9,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp .Lsqr8x_sub
.align 32 .align 32
.Lsqr8x_zero: .Lsqr8x_sub:
movdqa %xmm0,0(%rax) movq 0(%rbx),%r12
movdqa %xmm0,16(%rax) movq 8(%rbx),%r13
movdqa %xmm0,32(%rax) movq 16(%rbx),%r14
movdqa %xmm0,48(%rax) movq 24(%rbx),%r15
leaq 64(%rax),%rax leaq 32(%rbx),%rbx
movdqa %xmm0,0(%rdx) sbbq 0(%rbp),%r12
movdqa %xmm0,16(%rdx) sbbq 8(%rbp),%r13
movdqa %xmm0,32(%rdx) sbbq 16(%rbp),%r14
movdqa %xmm0,48(%rdx) sbbq 24(%rbp),%r15
leaq 64(%rdx),%rdx leaq 32(%rbp),%rbp
decq %r9 movq %r12,0(%rdi)
jnz .Lsqr8x_zero movq %r13,8(%rdi)
movq %r14,16(%rdi)
movq %r15,24(%rdi)
leaq 32(%rdi),%rdi
incq %rcx
jnz .Lsqr8x_sub
sbbq $0,%rax
leaq (%rbx,%r9,1),%rbx
leaq (%rdi,%r9,1),%rdi
.byte 102,72,15,110,200
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
jmp .Lsqr8x_cond_copy
.align 32
.Lsqr8x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
movdqa %xmm0,-32(%rbx,%rdx,1)
movdqa %xmm0,-16(%rbx,%rdx,1)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
addq $32,%r9
jnz .Lsqr8x_cond_copy
movq $1,%rax movq $1,%rax
movq -48(%rsi),%r15 movq -48(%rsi),%r15

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -495,14 +495,14 @@ md5_block_asm_data_order:
movl %ecx,%r11d movl %ecx,%r11d
addl %ecx,%ebx addl %ecx,%ebx
movl 0(%rsi),%r10d movl 0(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
xorl %edx,%r11d xorl %edx,%r11d
leal -198630844(%rax,%r10,1),%eax leal -198630844(%rax,%r10,1),%eax
orl %ebx,%r11d orl %ebx,%r11d
xorl %ecx,%r11d xorl %ecx,%r11d
addl %r11d,%eax addl %r11d,%eax
movl 28(%rsi),%r10d movl 28(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $6,%eax roll $6,%eax
xorl %ecx,%r11d xorl %ecx,%r11d
addl %ebx,%eax addl %ebx,%eax
@ -511,7 +511,7 @@ md5_block_asm_data_order:
xorl %ebx,%r11d xorl %ebx,%r11d
addl %r11d,%edx addl %r11d,%edx
movl 56(%rsi),%r10d movl 56(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $10,%edx roll $10,%edx
xorl %ebx,%r11d xorl %ebx,%r11d
addl %eax,%edx addl %eax,%edx
@ -520,7 +520,7 @@ md5_block_asm_data_order:
xorl %eax,%r11d xorl %eax,%r11d
addl %r11d,%ecx addl %r11d,%ecx
movl 20(%rsi),%r10d movl 20(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $15,%ecx roll $15,%ecx
xorl %eax,%r11d xorl %eax,%r11d
addl %edx,%ecx addl %edx,%ecx
@ -529,7 +529,7 @@ md5_block_asm_data_order:
xorl %edx,%r11d xorl %edx,%r11d
addl %r11d,%ebx addl %r11d,%ebx
movl 48(%rsi),%r10d movl 48(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $21,%ebx roll $21,%ebx
xorl %edx,%r11d xorl %edx,%r11d
addl %ecx,%ebx addl %ecx,%ebx
@ -538,7 +538,7 @@ md5_block_asm_data_order:
xorl %ecx,%r11d xorl %ecx,%r11d
addl %r11d,%eax addl %r11d,%eax
movl 12(%rsi),%r10d movl 12(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $6,%eax roll $6,%eax
xorl %ecx,%r11d xorl %ecx,%r11d
addl %ebx,%eax addl %ebx,%eax
@ -547,7 +547,7 @@ md5_block_asm_data_order:
xorl %ebx,%r11d xorl %ebx,%r11d
addl %r11d,%edx addl %r11d,%edx
movl 40(%rsi),%r10d movl 40(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $10,%edx roll $10,%edx
xorl %ebx,%r11d xorl %ebx,%r11d
addl %eax,%edx addl %eax,%edx
@ -556,7 +556,7 @@ md5_block_asm_data_order:
xorl %eax,%r11d xorl %eax,%r11d
addl %r11d,%ecx addl %r11d,%ecx
movl 4(%rsi),%r10d movl 4(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $15,%ecx roll $15,%ecx
xorl %eax,%r11d xorl %eax,%r11d
addl %edx,%ecx addl %edx,%ecx
@ -565,7 +565,7 @@ md5_block_asm_data_order:
xorl %edx,%r11d xorl %edx,%r11d
addl %r11d,%ebx addl %r11d,%ebx
movl 32(%rsi),%r10d movl 32(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $21,%ebx roll $21,%ebx
xorl %edx,%r11d xorl %edx,%r11d
addl %ecx,%ebx addl %ecx,%ebx
@ -574,7 +574,7 @@ md5_block_asm_data_order:
xorl %ecx,%r11d xorl %ecx,%r11d
addl %r11d,%eax addl %r11d,%eax
movl 60(%rsi),%r10d movl 60(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $6,%eax roll $6,%eax
xorl %ecx,%r11d xorl %ecx,%r11d
addl %ebx,%eax addl %ebx,%eax
@ -583,7 +583,7 @@ md5_block_asm_data_order:
xorl %ebx,%r11d xorl %ebx,%r11d
addl %r11d,%edx addl %r11d,%edx
movl 24(%rsi),%r10d movl 24(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $10,%edx roll $10,%edx
xorl %ebx,%r11d xorl %ebx,%r11d
addl %eax,%edx addl %eax,%edx
@ -592,7 +592,7 @@ md5_block_asm_data_order:
xorl %eax,%r11d xorl %eax,%r11d
addl %r11d,%ecx addl %r11d,%ecx
movl 52(%rsi),%r10d movl 52(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $15,%ecx roll $15,%ecx
xorl %eax,%r11d xorl %eax,%r11d
addl %edx,%ecx addl %edx,%ecx
@ -601,7 +601,7 @@ md5_block_asm_data_order:
xorl %edx,%r11d xorl %edx,%r11d
addl %r11d,%ebx addl %r11d,%ebx
movl 16(%rsi),%r10d movl 16(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $21,%ebx roll $21,%ebx
xorl %edx,%r11d xorl %edx,%r11d
addl %ecx,%ebx addl %ecx,%ebx
@ -610,7 +610,7 @@ md5_block_asm_data_order:
xorl %ecx,%r11d xorl %ecx,%r11d
addl %r11d,%eax addl %r11d,%eax
movl 44(%rsi),%r10d movl 44(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $6,%eax roll $6,%eax
xorl %ecx,%r11d xorl %ecx,%r11d
addl %ebx,%eax addl %ebx,%eax
@ -619,7 +619,7 @@ md5_block_asm_data_order:
xorl %ebx,%r11d xorl %ebx,%r11d
addl %r11d,%edx addl %r11d,%edx
movl 8(%rsi),%r10d movl 8(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $10,%edx roll $10,%edx
xorl %ebx,%r11d xorl %ebx,%r11d
addl %eax,%edx addl %eax,%edx
@ -628,7 +628,7 @@ md5_block_asm_data_order:
xorl %eax,%r11d xorl %eax,%r11d
addl %r11d,%ecx addl %r11d,%ecx
movl 36(%rsi),%r10d movl 36(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $15,%ecx roll $15,%ecx
xorl %eax,%r11d xorl %eax,%r11d
addl %edx,%ecx addl %edx,%ecx
@ -637,7 +637,7 @@ md5_block_asm_data_order:
xorl %edx,%r11d xorl %edx,%r11d
addl %r11d,%ebx addl %r11d,%ebx
movl 0(%rsi),%r10d movl 0(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $21,%ebx roll $21,%ebx
xorl %edx,%r11d xorl %edx,%r11d
addl %ecx,%ebx addl %ecx,%ebx

View File

@ -23,14 +23,14 @@ gcm_gmult_4bit:
movq $14,%rcx movq $14,%rcx
movq 8(%rsi,%rax,1),%r8 movq 8(%rsi,%rax,1),%r8
movq (%rsi,%rax,1),%r9 movq (%rsi,%rax,1),%r9
andb $240,%bl andb $0xf0,%bl
movq %r8,%rdx movq %r8,%rdx
jmp .Loop1 jmp .Loop1
.align 16 .align 16
.Loop1: .Loop1:
shrq $4,%r8 shrq $4,%r8
andq $15,%rdx andq $0xf,%rdx
movq %r9,%r10 movq %r9,%r10
movb (%rdi,%rcx,1),%al movb (%rdi,%rcx,1),%al
shrq $4,%r9 shrq $4,%r9
@ -46,13 +46,13 @@ gcm_gmult_4bit:
js .Lbreak1 js .Lbreak1
shrq $4,%r8 shrq $4,%r8
andq $15,%rdx andq $0xf,%rdx
movq %r9,%r10 movq %r9,%r10
shrq $4,%r9 shrq $4,%r9
xorq 8(%rsi,%rax,1),%r8 xorq 8(%rsi,%rax,1),%r8
shlq $60,%r10 shlq $60,%r10
xorq (%rsi,%rax,1),%r9 xorq (%rsi,%rax,1),%r9
andb $240,%bl andb $0xf0,%bl
xorq (%r11,%rdx,8),%r9 xorq (%r11,%rdx,8),%r9
movq %r8,%rdx movq %r8,%rdx
xorq %r10,%r8 xorq %r10,%r8
@ -61,19 +61,19 @@ gcm_gmult_4bit:
.align 16 .align 16
.Lbreak1: .Lbreak1:
shrq $4,%r8 shrq $4,%r8
andq $15,%rdx andq $0xf,%rdx
movq %r9,%r10 movq %r9,%r10
shrq $4,%r9 shrq $4,%r9
xorq 8(%rsi,%rax,1),%r8 xorq 8(%rsi,%rax,1),%r8
shlq $60,%r10 shlq $60,%r10
xorq (%rsi,%rax,1),%r9 xorq (%rsi,%rax,1),%r9
andb $240,%bl andb $0xf0,%bl
xorq (%r11,%rdx,8),%r9 xorq (%r11,%rdx,8),%r9
movq %r8,%rdx movq %r8,%rdx
xorq %r10,%r8 xorq %r10,%r8
shrq $4,%r8 shrq $4,%r8
andq $15,%rdx andq $0xf,%rdx
movq %r9,%r10 movq %r9,%r10
shrq $4,%r9 shrq $4,%r9
xorq 8(%rsi,%rbx,1),%r8 xorq 8(%rsi,%rbx,1),%r8
@ -881,20 +881,20 @@ gcm_ghash_clmul:
movdqu 32(%rsi),%xmm7 movdqu 32(%rsi),%xmm7
.byte 102,65,15,56,0,194 .byte 102,65,15,56,0,194
subq $16,%rcx subq $0x10,%rcx
jz .Lodd_tail jz .Lodd_tail
movdqu 16(%rsi),%xmm6 movdqu 16(%rsi),%xmm6
movl OPENSSL_ia32cap_P+4(%rip),%eax movl OPENSSL_ia32cap_P+4(%rip),%eax
cmpq $48,%rcx cmpq $0x30,%rcx
jb .Lskip4x jb .Lskip4x
andl $71303168,%eax andl $71303168,%eax
cmpl $4194304,%eax cmpl $4194304,%eax
je .Lskip4x je .Lskip4x
subq $48,%rcx subq $0x30,%rcx
movq $11547335547999543296,%rax movq $0xA040608020C0E000,%rax
movdqu 48(%rsi),%xmm14 movdqu 48(%rsi),%xmm14
movdqu 64(%rsi),%xmm15 movdqu 64(%rsi),%xmm15
@ -941,7 +941,7 @@ gcm_ghash_clmul:
xorps %xmm13,%xmm5 xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx leaq 64(%rdx),%rdx
subq $64,%rcx subq $0x40,%rcx
jc .Ltail4x jc .Ltail4x
jmp .Lmod4_loop jmp .Lmod4_loop
@ -1024,7 +1024,7 @@ gcm_ghash_clmul:
xorps %xmm13,%xmm5 xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx leaq 64(%rdx),%rdx
subq $64,%rcx subq $0x40,%rcx
jnc .Lmod4_loop jnc .Lmod4_loop
.Ltail4x: .Ltail4x:
@ -1068,10 +1068,10 @@ gcm_ghash_clmul:
pxor %xmm4,%xmm0 pxor %xmm4,%xmm0
psrlq $1,%xmm0 psrlq $1,%xmm0
pxor %xmm1,%xmm0 pxor %xmm1,%xmm0
addq $64,%rcx addq $0x40,%rcx
jz .Ldone jz .Ldone
movdqu 32(%rsi),%xmm7 movdqu 32(%rsi),%xmm7
subq $16,%rcx subq $0x10,%rcx
jz .Lodd_tail jz .Lodd_tail
.Lskip4x: .Lskip4x:
@ -1094,7 +1094,7 @@ gcm_ghash_clmul:
leaq 32(%rdx),%rdx leaq 32(%rdx),%rdx
nop nop
subq $32,%rcx subq $0x20,%rcx
jbe .Leven_tail jbe .Leven_tail
nop nop
jmp .Lmod_loop jmp .Lmod_loop
@ -1157,7 +1157,7 @@ gcm_ghash_clmul:
.byte 102,15,58,68,231,0 .byte 102,15,58,68,231,0
pxor %xmm1,%xmm0 pxor %xmm1,%xmm0
subq $32,%rcx subq $0x20,%rcx
ja .Lmod_loop ja .Lmod_loop
.Leven_tail: .Leven_tail:

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,969 @@
#if defined(__i386__)
.file "chacha-x86.S"
.text
.globl _ChaCha20_ctr32
.private_extern _ChaCha20_ctr32
.align 4
_ChaCha20_ctr32:
L_ChaCha20_ctr32_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %eax,%eax
cmpl 28(%esp),%eax
je L000no_data
call Lpic_point
Lpic_point:
popl %eax
movl L_OPENSSL_ia32cap_P$non_lazy_ptr-Lpic_point(%eax),%ebp
testl $16777216,(%ebp)
jz L001x86
testl $512,4(%ebp)
jz L001x86
jmp Lssse3_shortcut
L001x86:
movl 32(%esp),%esi
movl 36(%esp),%edi
subl $132,%esp
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
movl %eax,80(%esp)
movl %ebx,84(%esp)
movl %ecx,88(%esp)
movl %edx,92(%esp)
movl 16(%esi),%eax
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edx
movl %eax,96(%esp)
movl %ebx,100(%esp)
movl %ecx,104(%esp)
movl %edx,108(%esp)
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
subl $1,%eax
movl %eax,112(%esp)
movl %ebx,116(%esp)
movl %ecx,120(%esp)
movl %edx,124(%esp)
jmp L002entry
.align 4,0x90
L003outer_loop:
movl %ebx,156(%esp)
movl %eax,152(%esp)
movl %ecx,160(%esp)
L002entry:
movl $1634760805,%eax
movl $857760878,4(%esp)
movl $2036477234,8(%esp)
movl $1797285236,12(%esp)
movl 84(%esp),%ebx
movl 88(%esp),%ebp
movl 104(%esp),%ecx
movl 108(%esp),%esi
movl 116(%esp),%edx
movl 120(%esp),%edi
movl %ebx,20(%esp)
movl %ebp,24(%esp)
movl %ecx,40(%esp)
movl %esi,44(%esp)
movl %edx,52(%esp)
movl %edi,56(%esp)
movl 92(%esp),%ebx
movl 124(%esp),%edi
movl 112(%esp),%edx
movl 80(%esp),%ebp
movl 96(%esp),%ecx
movl 100(%esp),%esi
addl $1,%edx
movl %ebx,28(%esp)
movl %edi,60(%esp)
movl %edx,112(%esp)
movl $10,%ebx
jmp L004loop
.align 4,0x90
L004loop:
addl %ebp,%eax
movl %ebx,128(%esp)
movl %ebp,%ebx
xorl %eax,%edx
roll $16,%edx
addl %edx,%ecx
xorl %ecx,%ebx
movl 52(%esp),%edi
roll $12,%ebx
movl 20(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,(%esp)
roll $8,%edx
movl 4(%esp),%eax
addl %edx,%ecx
movl %edx,48(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
movl %ecx,32(%esp)
roll $16,%edi
movl %ebx,16(%esp)
addl %edi,%esi
movl 40(%esp),%ecx
xorl %esi,%ebp
movl 56(%esp),%edx
roll $12,%ebp
movl 24(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,4(%esp)
roll $8,%edi
movl 8(%esp),%eax
addl %edi,%esi
movl %edi,52(%esp)
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
movl %esi,36(%esp)
roll $16,%edx
movl %ebp,20(%esp)
addl %edx,%ecx
movl 44(%esp),%esi
xorl %ecx,%ebx
movl 60(%esp),%edi
roll $12,%ebx
movl 28(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,8(%esp)
roll $8,%edx
movl 12(%esp),%eax
addl %edx,%ecx
movl %edx,56(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
roll $16,%edi
movl %ebx,24(%esp)
addl %edi,%esi
xorl %esi,%ebp
roll $12,%ebp
movl 20(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,12(%esp)
roll $8,%edi
movl (%esp),%eax
addl %edi,%esi
movl %edi,%edx
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
roll $16,%edx
movl %ebp,28(%esp)
addl %edx,%ecx
xorl %ecx,%ebx
movl 48(%esp),%edi
roll $12,%ebx
movl 24(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,(%esp)
roll $8,%edx
movl 4(%esp),%eax
addl %edx,%ecx
movl %edx,60(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
movl %ecx,40(%esp)
roll $16,%edi
movl %ebx,20(%esp)
addl %edi,%esi
movl 32(%esp),%ecx
xorl %esi,%ebp
movl 52(%esp),%edx
roll $12,%ebp
movl 28(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,4(%esp)
roll $8,%edi
movl 8(%esp),%eax
addl %edi,%esi
movl %edi,48(%esp)
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
movl %esi,44(%esp)
roll $16,%edx
movl %ebp,24(%esp)
addl %edx,%ecx
movl 36(%esp),%esi
xorl %ecx,%ebx
movl 56(%esp),%edi
roll $12,%ebx
movl 16(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,8(%esp)
roll $8,%edx
movl 12(%esp),%eax
addl %edx,%ecx
movl %edx,52(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
roll $16,%edi
movl %ebx,28(%esp)
addl %edi,%esi
xorl %esi,%ebp
movl 48(%esp),%edx
roll $12,%ebp
movl 128(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,12(%esp)
roll $8,%edi
movl (%esp),%eax
addl %edi,%esi
movl %edi,56(%esp)
xorl %esi,%ebp
roll $7,%ebp
decl %ebx
jnz L004loop
movl 160(%esp),%ebx
addl $1634760805,%eax
addl 80(%esp),%ebp
addl 96(%esp),%ecx
addl 100(%esp),%esi
cmpl $64,%ebx
jb L005tail
movl 156(%esp),%ebx
addl 112(%esp),%edx
addl 120(%esp),%edi
xorl (%ebx),%eax
xorl 16(%ebx),%ebp
movl %eax,(%esp)
movl 152(%esp),%eax
xorl 32(%ebx),%ecx
xorl 36(%ebx),%esi
xorl 48(%ebx),%edx
xorl 56(%ebx),%edi
movl %ebp,16(%eax)
movl %ecx,32(%eax)
movl %esi,36(%eax)
movl %edx,48(%eax)
movl %edi,56(%eax)
movl 4(%esp),%ebp
movl 8(%esp),%ecx
movl 12(%esp),%esi
movl 20(%esp),%edx
movl 24(%esp),%edi
addl $857760878,%ebp
addl $2036477234,%ecx
addl $1797285236,%esi
addl 84(%esp),%edx
addl 88(%esp),%edi
xorl 4(%ebx),%ebp
xorl 8(%ebx),%ecx
xorl 12(%ebx),%esi
xorl 20(%ebx),%edx
xorl 24(%ebx),%edi
movl %ebp,4(%eax)
movl %ecx,8(%eax)
movl %esi,12(%eax)
movl %edx,20(%eax)
movl %edi,24(%eax)
movl 28(%esp),%ebp
movl 40(%esp),%ecx
movl 44(%esp),%esi
movl 52(%esp),%edx
movl 60(%esp),%edi
addl 92(%esp),%ebp
addl 104(%esp),%ecx
addl 108(%esp),%esi
addl 116(%esp),%edx
addl 124(%esp),%edi
xorl 28(%ebx),%ebp
xorl 40(%ebx),%ecx
xorl 44(%ebx),%esi
xorl 52(%ebx),%edx
xorl 60(%ebx),%edi
leal 64(%ebx),%ebx
movl %ebp,28(%eax)
movl (%esp),%ebp
movl %ecx,40(%eax)
movl 160(%esp),%ecx
movl %esi,44(%eax)
movl %edx,52(%eax)
movl %edi,60(%eax)
movl %ebp,(%eax)
leal 64(%eax),%eax
subl $64,%ecx
jnz L003outer_loop
jmp L006done
L005tail:
addl 112(%esp),%edx
addl 120(%esp),%edi
movl %eax,(%esp)
movl %ebp,16(%esp)
movl %ecx,32(%esp)
movl %esi,36(%esp)
movl %edx,48(%esp)
movl %edi,56(%esp)
movl 4(%esp),%ebp
movl 8(%esp),%ecx
movl 12(%esp),%esi
movl 20(%esp),%edx
movl 24(%esp),%edi
addl $857760878,%ebp
addl $2036477234,%ecx
addl $1797285236,%esi
addl 84(%esp),%edx
addl 88(%esp),%edi
movl %ebp,4(%esp)
movl %ecx,8(%esp)
movl %esi,12(%esp)
movl %edx,20(%esp)
movl %edi,24(%esp)
movl 28(%esp),%ebp
movl 40(%esp),%ecx
movl 44(%esp),%esi
movl 52(%esp),%edx
movl 60(%esp),%edi
addl 92(%esp),%ebp
addl 104(%esp),%ecx
addl 108(%esp),%esi
addl 116(%esp),%edx
addl 124(%esp),%edi
movl %ebp,28(%esp)
movl 156(%esp),%ebp
movl %ecx,40(%esp)
movl 152(%esp),%ecx
movl %esi,44(%esp)
xorl %esi,%esi
movl %edx,52(%esp)
movl %edi,60(%esp)
xorl %eax,%eax
xorl %edx,%edx
L007tail_loop:
movb (%esi,%ebp,1),%al
movb (%esp,%esi,1),%dl
leal 1(%esi),%esi
xorb %dl,%al
movb %al,-1(%ecx,%esi,1)
decl %ebx
jnz L007tail_loop
L006done:
addl $132,%esp
L000no_data:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.globl _ChaCha20_ssse3
.private_extern _ChaCha20_ssse3
.align 4
_ChaCha20_ssse3:
L_ChaCha20_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
Lssse3_shortcut:
movl 20(%esp),%edi
movl 24(%esp),%esi
movl 28(%esp),%ecx
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $524,%esp
andl $-64,%esp
movl %ebp,512(%esp)
leal Lssse3_data-Lpic_point(%eax),%eax
movdqu (%ebx),%xmm3
cmpl $256,%ecx
jb L0081x
movl %edx,516(%esp)
movl %ebx,520(%esp)
subl $256,%ecx
leal 384(%esp),%ebp
movdqu (%edx),%xmm7
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
paddd 48(%eax),%xmm0
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
psubd 64(%eax),%xmm0
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,64(%ebp)
movdqa %xmm1,80(%ebp)
movdqa %xmm2,96(%ebp)
movdqa %xmm3,112(%ebp)
movdqu 16(%edx),%xmm3
movdqa %xmm4,-64(%ebp)
movdqa %xmm5,-48(%ebp)
movdqa %xmm6,-32(%ebp)
movdqa %xmm7,-16(%ebp)
movdqa 32(%eax),%xmm7
leal 128(%esp),%ebx
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,(%ebp)
movdqa %xmm1,16(%ebp)
movdqa %xmm2,32(%ebp)
movdqa %xmm3,48(%ebp)
movdqa %xmm4,-128(%ebp)
movdqa %xmm5,-112(%ebp)
movdqa %xmm6,-96(%ebp)
movdqa %xmm7,-80(%ebp)
leal 128(%esi),%esi
leal 128(%edi),%edi
jmp L009outer_loop
.align 4,0x90
L009outer_loop:
movdqa -112(%ebp),%xmm1
movdqa -96(%ebp),%xmm2
movdqa -80(%ebp),%xmm3
movdqa -48(%ebp),%xmm5
movdqa -32(%ebp),%xmm6
movdqa -16(%ebp),%xmm7
movdqa %xmm1,-112(%ebx)
movdqa %xmm2,-96(%ebx)
movdqa %xmm3,-80(%ebx)
movdqa %xmm5,-48(%ebx)
movdqa %xmm6,-32(%ebx)
movdqa %xmm7,-16(%ebx)
movdqa 32(%ebp),%xmm2
movdqa 48(%ebp),%xmm3
movdqa 64(%ebp),%xmm4
movdqa 80(%ebp),%xmm5
movdqa 96(%ebp),%xmm6
movdqa 112(%ebp),%xmm7
paddd 64(%eax),%xmm4
movdqa %xmm2,32(%ebx)
movdqa %xmm3,48(%ebx)
movdqa %xmm4,64(%ebx)
movdqa %xmm5,80(%ebx)
movdqa %xmm6,96(%ebx)
movdqa %xmm7,112(%ebx)
movdqa %xmm4,64(%ebp)
movdqa -128(%ebp),%xmm0
movdqa %xmm4,%xmm6
movdqa -64(%ebp),%xmm3
movdqa (%ebp),%xmm4
movdqa 16(%ebp),%xmm5
movl $10,%edx
nop
.align 4,0x90
L010loop:
paddd %xmm3,%xmm0
movdqa %xmm3,%xmm2
pxor %xmm0,%xmm6
pshufb (%eax),%xmm6
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -48(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 80(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,64(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-64(%ebx)
paddd %xmm7,%xmm5
movdqa 32(%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -32(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 96(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,80(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,16(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-48(%ebx)
paddd %xmm6,%xmm4
movdqa 48(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -16(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 112(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,96(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-32(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa -48(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,%xmm6
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
pshufb (%eax),%xmm6
movdqa %xmm3,-16(%ebx)
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -32(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 64(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,112(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,32(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-48(%ebx)
paddd %xmm7,%xmm5
movdqa (%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -16(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 80(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,64(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,48(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-32(%ebx)
paddd %xmm6,%xmm4
movdqa 16(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -64(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 96(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,80(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-16(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 64(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,96(%ebx)
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
por %xmm1,%xmm3
decl %edx
jnz L010loop
movdqa %xmm3,-64(%ebx)
movdqa %xmm4,(%ebx)
movdqa %xmm5,16(%ebx)
movdqa %xmm6,64(%ebx)
movdqa %xmm7,96(%ebx)
movdqa -112(%ebx),%xmm1
movdqa -96(%ebx),%xmm2
movdqa -80(%ebx),%xmm3
paddd -128(%ebp),%xmm0
paddd -112(%ebp),%xmm1
paddd -96(%ebp),%xmm2
paddd -80(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa -64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa -48(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa -32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa -16(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd -64(%ebp),%xmm0
paddd -48(%ebp),%xmm1
paddd -32(%ebp),%xmm2
paddd -16(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa (%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 16(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 48(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd (%ebp),%xmm0
paddd 16(%ebp),%xmm1
paddd 32(%ebp),%xmm2
paddd 48(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa 64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 80(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 96(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 112(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd 64(%ebp),%xmm0
paddd 80(%ebp),%xmm1
paddd 96(%ebp),%xmm2
paddd 112(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 208(%esi),%esi
pxor %xmm0,%xmm4
pxor %xmm1,%xmm5
pxor %xmm2,%xmm6
pxor %xmm3,%xmm7
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 208(%edi),%edi
subl $256,%ecx
jnc L009outer_loop
addl $256,%ecx
jz L011done
movl 520(%esp),%ebx
leal -128(%esi),%esi
movl 516(%esp),%edx
leal -128(%edi),%edi
movd 64(%ebp),%xmm2
movdqu (%ebx),%xmm3
paddd 96(%eax),%xmm2
pand 112(%eax),%xmm3
por %xmm2,%xmm3
L0081x:
movdqa 32(%eax),%xmm0
movdqu (%edx),%xmm1
movdqu 16(%edx),%xmm2
movdqa (%eax),%xmm6
movdqa 16(%eax),%xmm7
movl %ebp,48(%esp)
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
movl $10,%edx
jmp L012loop1x
.align 4,0x90
L013outer1x:
movdqa 80(%eax),%xmm3
movdqa (%esp),%xmm0
movdqa 16(%esp),%xmm1
movdqa 32(%esp),%xmm2
paddd 48(%esp),%xmm3
movl $10,%edx
movdqa %xmm3,48(%esp)
jmp L012loop1x
.align 4,0x90
L012loop1x:
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $57,%xmm1,%xmm1
pshufd $147,%xmm3,%xmm3
nop
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $147,%xmm1,%xmm1
pshufd $57,%xmm3,%xmm3
decl %edx
jnz L012loop1x
paddd (%esp),%xmm0
paddd 16(%esp),%xmm1
paddd 32(%esp),%xmm2
paddd 48(%esp),%xmm3
cmpl $64,%ecx
jb L014tail
movdqu (%esi),%xmm4
movdqu 16(%esi),%xmm5
pxor %xmm4,%xmm0
movdqu 32(%esi),%xmm4
pxor %xmm5,%xmm1
movdqu 48(%esi),%xmm5
pxor %xmm4,%xmm2
pxor %xmm5,%xmm3
leal 64(%esi),%esi
movdqu %xmm0,(%edi)
movdqu %xmm1,16(%edi)
movdqu %xmm2,32(%edi)
movdqu %xmm3,48(%edi)
leal 64(%edi),%edi
subl $64,%ecx
jnz L013outer1x
jmp L011done
L014tail:
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
xorl %eax,%eax
xorl %edx,%edx
xorl %ebp,%ebp
L015tail_loop:
movb (%esp,%ebp,1),%al
movb (%esi,%ebp,1),%dl
leal 1(%ebp),%ebp
xorb %dl,%al
movb %al,-1(%edi,%ebp,1)
decl %ecx
jnz L015tail_loop
L011done:
movl 512(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 6,0x90
Lssse3_data:
.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
.long 1634760805,857760878,2036477234,1797285236
.long 0,1,2,3
.long 4,4,4,4
.long 1,0,0,0
.long 4,0,0,0
.long 0,-1,-1,-1
.align 6,0x90
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54
.byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32
.byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111
.byte 114,103,62,0
.section __IMPORT,__pointers,non_lazy_symbol_pointers
L_OPENSSL_ia32cap_P$non_lazy_ptr:
.indirect_symbol _OPENSSL_ia32cap_P
.long 0
#endif

View File

@ -343,39 +343,6 @@ L015exit:
popl %ebx popl %ebx
popl %ebp popl %ebp
ret ret
.globl _RC4_options
.private_extern _RC4_options
.align 4
_RC4_options:
L_RC4_options_begin:
call L018pic_point
L018pic_point:
popl %eax
leal L019opts-L018pic_point(%eax),%eax
call L020PIC_me_up
L020PIC_me_up:
popl %edx
movl L_OPENSSL_ia32cap_P$non_lazy_ptr-L020PIC_me_up(%edx),%edx
movl (%edx),%edx
btl $20,%edx
jc L0211xchar
btl $26,%edx
jnc L022ret
addl $25,%eax
ret
L0211xchar:
addl $12,%eax
L022ret:
ret
.align 6,0x90
L019opts:
.byte 114,99,52,40,52,120,44,105,110,116,41,0
.byte 114,99,52,40,49,120,44,99,104,97,114,41,0
.byte 114,99,52,40,56,120,44,109,109,120,41,0
.byte 82,67,52,32,102,111,114,32,120,56,54,44,32,67,82,89
.byte 80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114
.byte 111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 6,0x90
.section __IMPORT,__pointers,non_lazy_symbol_pointers .section __IMPORT,__pointers,non_lazy_symbol_pointers
L_OPENSSL_ia32cap_P$non_lazy_ptr: L_OPENSSL_ia32cap_P$non_lazy_ptr:
.indirect_symbol _OPENSSL_ia32cap_P .indirect_symbol _OPENSSL_ia32cap_P

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -82,8 +82,8 @@ L$enc_loop:
movl 0(%r14,%rdi,8),%edi movl 0(%r14,%rdi,8),%edi
movl 0(%r14,%rbp,8),%ebp movl 0(%r14,%rbp,8),%ebp
andl $65280,%edi andl $0x0000ff00,%edi
andl $65280,%ebp andl $0x0000ff00,%ebp
xorl %edi,%r10d xorl %edi,%r10d
xorl %ebp,%r11d xorl %ebp,%r11d
@ -95,8 +95,8 @@ L$enc_loop:
movl 0(%r14,%rsi,8),%esi movl 0(%r14,%rsi,8),%esi
movl 0(%r14,%rdi,8),%edi movl 0(%r14,%rdi,8),%edi
andl $65280,%esi andl $0x0000ff00,%esi
andl $65280,%edi andl $0x0000ff00,%edi
shrl $16,%ebx shrl $16,%ebx
xorl %esi,%r12d xorl %esi,%r12d
xorl %edi,%r8d xorl %edi,%r8d
@ -109,9 +109,9 @@ L$enc_loop:
movl 0(%r14,%rdi,8),%edi movl 0(%r14,%rdi,8),%edi
movl 0(%r14,%rbp,8),%ebp movl 0(%r14,%rbp,8),%ebp
andl $16711680,%esi andl $0x00ff0000,%esi
andl $16711680,%edi andl $0x00ff0000,%edi
andl $16711680,%ebp andl $0x00ff0000,%ebp
xorl %esi,%r10d xorl %esi,%r10d
xorl %edi,%r11d xorl %edi,%r11d
@ -124,9 +124,9 @@ L$enc_loop:
movl 2(%r14,%rdi,8),%edi movl 2(%r14,%rdi,8),%edi
movl 2(%r14,%rbp,8),%ebp movl 2(%r14,%rbp,8),%ebp
andl $16711680,%esi andl $0x00ff0000,%esi
andl $4278190080,%edi andl $0xff000000,%edi
andl $4278190080,%ebp andl $0xff000000,%ebp
xorl %esi,%r8d xorl %esi,%r8d
xorl %edi,%r10d xorl %edi,%r10d
@ -139,8 +139,8 @@ L$enc_loop:
movl 2(%r14,%rdi,8),%edi movl 2(%r14,%rdi,8),%edi
movl 16+0(%r15),%eax movl 16+0(%r15),%eax
andl $4278190080,%esi andl $0xff000000,%esi
andl $4278190080,%edi andl $0xff000000,%edi
xorl %esi,%r12d xorl %esi,%r12d
xorl %edi,%r8d xorl %edi,%r8d
@ -242,8 +242,8 @@ L$enc_loop_compact:
xorl %r8d,%edx xorl %r8d,%edx
cmpq 16(%rsp),%r15 cmpq 16(%rsp),%r15
je L$enc_compact_done je L$enc_compact_done
movl $2155905152,%r10d movl $0x80808080,%r10d
movl $2155905152,%r11d movl $0x80808080,%r11d
andl %eax,%r10d andl %eax,%r10d
andl %ebx,%r11d andl %ebx,%r11d
movl %r10d,%esi movl %r10d,%esi
@ -254,10 +254,10 @@ L$enc_loop_compact:
leal (%rbx,%rbx,1),%r9d leal (%rbx,%rbx,1),%r9d
subl %r10d,%esi subl %r10d,%esi
subl %r11d,%edi subl %r11d,%edi
andl $4278124286,%r8d andl $0xfefefefe,%r8d
andl $4278124286,%r9d andl $0xfefefefe,%r9d
andl $454761243,%esi andl $0x1b1b1b1b,%esi
andl $454761243,%edi andl $0x1b1b1b1b,%edi
movl %eax,%r10d movl %eax,%r10d
movl %ebx,%r11d movl %ebx,%r11d
xorl %esi,%r8d xorl %esi,%r8d
@ -265,9 +265,9 @@ L$enc_loop_compact:
xorl %r8d,%eax xorl %r8d,%eax
xorl %r9d,%ebx xorl %r9d,%ebx
movl $2155905152,%r12d movl $0x80808080,%r12d
roll $24,%eax roll $24,%eax
movl $2155905152,%ebp movl $0x80808080,%ebp
roll $24,%ebx roll $24,%ebx
andl %ecx,%r12d andl %ecx,%r12d
andl %edx,%ebp andl %edx,%ebp
@ -290,10 +290,10 @@ L$enc_loop_compact:
xorl %r10d,%eax xorl %r10d,%eax
xorl %r11d,%ebx xorl %r11d,%ebx
andl $4278124286,%r8d andl $0xfefefefe,%r8d
andl $4278124286,%r9d andl $0xfefefefe,%r9d
andl $454761243,%esi andl $0x1b1b1b1b,%esi
andl $454761243,%edi andl $0x1b1b1b1b,%edi
movl %ecx,%r12d movl %ecx,%r12d
movl %edx,%ebp movl %edx,%ebp
xorl %esi,%r8d xorl %esi,%r8d
@ -345,7 +345,7 @@ _asm_AES_encrypt:
andq $-64,%rsp andq $-64,%rsp
subq %rsp,%rcx subq %rsp,%rcx
negq %rcx negq %rcx
andq $960,%rcx andq $0x3c0,%rcx
subq %rcx,%rsp subq %rcx,%rsp
subq $32,%rsp subq $32,%rsp
@ -370,7 +370,7 @@ L$enc_prologue:
leaq L$AES_Te+2048(%rip),%r14 leaq L$AES_Te+2048(%rip),%r14
leaq 768(%rsp),%rbp leaq 768(%rsp),%rbp
subq %r14,%rbp subq %r14,%rbp
andq $768,%rbp andq $0x300,%rbp
leaq (%r14,%rbp,1),%r14 leaq (%r14,%rbp,1),%r14
call _x86_64_AES_encrypt_compact call _x86_64_AES_encrypt_compact
@ -791,7 +791,7 @@ _asm_AES_decrypt:
andq $-64,%rsp andq $-64,%rsp
subq %rsp,%rcx subq %rsp,%rcx
negq %rcx negq %rcx
andq $960,%rcx andq $0x3c0,%rcx
subq %rcx,%rsp subq %rcx,%rsp
subq $32,%rsp subq $32,%rsp
@ -816,7 +816,7 @@ L$dec_prologue:
leaq L$AES_Td+2048(%rip),%r14 leaq L$AES_Td+2048(%rip),%r14
leaq 768(%rsp),%rbp leaq 768(%rsp),%rbp
subq %r14,%rbp subq %r14,%rbp
andq $768,%rbp andq $0x300,%rbp
leaq (%r14,%rbp,1),%r14 leaq (%r14,%rbp,1),%r14
shrq $3,%rbp shrq $3,%rbp
addq %rbp,%r14 addq %rbp,%r14
@ -1333,9 +1333,9 @@ L$cbc_picked_te:
movq %r14,%r10 movq %r14,%r10
leaq 2304(%r14),%r11 leaq 2304(%r14),%r11
movq %r15,%r12 movq %r15,%r12
andq $4095,%r10 andq $0xFFF,%r10
andq $4095,%r11 andq $0xFFF,%r11
andq $4095,%r12 andq $0xFFF,%r12
cmpq %r11,%r12 cmpq %r11,%r12
jb L$cbc_te_break_out jb L$cbc_te_break_out
@ -1344,7 +1344,7 @@ L$cbc_picked_te:
jmp L$cbc_te_ok jmp L$cbc_te_ok
L$cbc_te_break_out: L$cbc_te_break_out:
subq %r10,%r12 subq %r10,%r12
andq $4095,%r12 andq $0xFFF,%r12
addq $320,%r12 addq $320,%r12
subq %r12,%r15 subq %r12,%r15
.p2align 2 .p2align 2
@ -1370,7 +1370,7 @@ L$cbc_fast_body:
movq %r15,%r10 movq %r15,%r10
subq %r14,%r10 subq %r14,%r10
andq $4095,%r10 andq $0xfff,%r10
cmpq $2304,%r10 cmpq $2304,%r10
jb L$cbc_do_ecopy jb L$cbc_do_ecopy
cmpq $4096-248,%r10 cmpq $4096-248,%r10
@ -1557,7 +1557,7 @@ L$cbc_slow_prologue:
leaq -88-63(%rcx),%r10 leaq -88-63(%rcx),%r10
subq %rbp,%r10 subq %rbp,%r10
negq %r10 negq %r10
andq $960,%r10 andq $0x3c0,%r10
subq %r10,%rbp subq %r10,%rbp
xchgq %rsp,%rbp xchgq %rsp,%rbp
@ -1586,7 +1586,7 @@ L$cbc_slow_body:
leaq 2048(%r14),%r14 leaq 2048(%r14),%r14
leaq 768-8(%rsp),%rax leaq 768-8(%rsp),%rax
subq %r14,%rax subq %r14,%rax
andq $768,%rax andq $0x300,%rax
leaq (%r14,%rax,1),%r14 leaq (%r14,%rax,1),%r14
cmpq $0,%rbx cmpq $0,%rbx

View File

@ -507,7 +507,7 @@ _aesni_ecb_encrypt:
testl %r8d,%r8d testl %r8d,%r8d
jz L$ecb_decrypt jz L$ecb_decrypt
cmpq $128,%rdx cmpq $0x80,%rdx
jb L$ecb_enc_tail jb L$ecb_enc_tail
movdqu (%rdi),%xmm2 movdqu (%rdi),%xmm2
@ -519,7 +519,7 @@ _aesni_ecb_encrypt:
movdqu 96(%rdi),%xmm8 movdqu 96(%rdi),%xmm8
movdqu 112(%rdi),%xmm9 movdqu 112(%rdi),%xmm9
leaq 128(%rdi),%rdi leaq 128(%rdi),%rdi
subq $128,%rdx subq $0x80,%rdx
jmp L$ecb_enc_loop8_enter jmp L$ecb_enc_loop8_enter
.p2align 4 .p2align 4
L$ecb_enc_loop8: L$ecb_enc_loop8:
@ -547,7 +547,7 @@ L$ecb_enc_loop8_enter:
call _aesni_encrypt8 call _aesni_encrypt8
subq $128,%rdx subq $0x80,%rdx
jnc L$ecb_enc_loop8 jnc L$ecb_enc_loop8
movups %xmm2,(%rsi) movups %xmm2,(%rsi)
@ -561,22 +561,22 @@ L$ecb_enc_loop8_enter:
movups %xmm8,96(%rsi) movups %xmm8,96(%rsi)
movups %xmm9,112(%rsi) movups %xmm9,112(%rsi)
leaq 128(%rsi),%rsi leaq 128(%rsi),%rsi
addq $128,%rdx addq $0x80,%rdx
jz L$ecb_ret jz L$ecb_ret
L$ecb_enc_tail: L$ecb_enc_tail:
movups (%rdi),%xmm2 movups (%rdi),%xmm2
cmpq $32,%rdx cmpq $0x20,%rdx
jb L$ecb_enc_one jb L$ecb_enc_one
movups 16(%rdi),%xmm3 movups 16(%rdi),%xmm3
je L$ecb_enc_two je L$ecb_enc_two
movups 32(%rdi),%xmm4 movups 32(%rdi),%xmm4
cmpq $64,%rdx cmpq $0x40,%rdx
jb L$ecb_enc_three jb L$ecb_enc_three
movups 48(%rdi),%xmm5 movups 48(%rdi),%xmm5
je L$ecb_enc_four je L$ecb_enc_four
movups 64(%rdi),%xmm6 movups 64(%rdi),%xmm6
cmpq $96,%rdx cmpq $0x60,%rdx
jb L$ecb_enc_five jb L$ecb_enc_five
movups 80(%rdi),%xmm7 movups 80(%rdi),%xmm7
je L$ecb_enc_six je L$ecb_enc_six
@ -650,7 +650,7 @@ L$ecb_enc_six:
.p2align 4 .p2align 4
L$ecb_decrypt: L$ecb_decrypt:
cmpq $128,%rdx cmpq $0x80,%rdx
jb L$ecb_dec_tail jb L$ecb_dec_tail
movdqu (%rdi),%xmm2 movdqu (%rdi),%xmm2
@ -662,7 +662,7 @@ L$ecb_decrypt:
movdqu 96(%rdi),%xmm8 movdqu 96(%rdi),%xmm8
movdqu 112(%rdi),%xmm9 movdqu 112(%rdi),%xmm9
leaq 128(%rdi),%rdi leaq 128(%rdi),%rdi
subq $128,%rdx subq $0x80,%rdx
jmp L$ecb_dec_loop8_enter jmp L$ecb_dec_loop8_enter
.p2align 4 .p2align 4
L$ecb_dec_loop8: L$ecb_dec_loop8:
@ -691,7 +691,7 @@ L$ecb_dec_loop8_enter:
call _aesni_decrypt8 call _aesni_decrypt8
movups (%r11),%xmm0 movups (%r11),%xmm0
subq $128,%rdx subq $0x80,%rdx
jnc L$ecb_dec_loop8 jnc L$ecb_dec_loop8
movups %xmm2,(%rsi) movups %xmm2,(%rsi)
@ -713,22 +713,22 @@ L$ecb_dec_loop8_enter:
movups %xmm9,112(%rsi) movups %xmm9,112(%rsi)
pxor %xmm9,%xmm9 pxor %xmm9,%xmm9
leaq 128(%rsi),%rsi leaq 128(%rsi),%rsi
addq $128,%rdx addq $0x80,%rdx
jz L$ecb_ret jz L$ecb_ret
L$ecb_dec_tail: L$ecb_dec_tail:
movups (%rdi),%xmm2 movups (%rdi),%xmm2
cmpq $32,%rdx cmpq $0x20,%rdx
jb L$ecb_dec_one jb L$ecb_dec_one
movups 16(%rdi),%xmm3 movups 16(%rdi),%xmm3
je L$ecb_dec_two je L$ecb_dec_two
movups 32(%rdi),%xmm4 movups 32(%rdi),%xmm4
cmpq $64,%rdx cmpq $0x40,%rdx
jb L$ecb_dec_three jb L$ecb_dec_three
movups 48(%rdi),%xmm5 movups 48(%rdi),%xmm5
je L$ecb_dec_four je L$ecb_dec_four
movups 64(%rdi),%xmm6 movups 64(%rdi),%xmm6
cmpq $96,%rdx cmpq $0x60,%rdx
jb L$ecb_dec_five jb L$ecb_dec_five
movups 80(%rdi),%xmm7 movups 80(%rdi),%xmm7
je L$ecb_dec_six je L$ecb_dec_six
@ -1606,7 +1606,7 @@ L$oop_enc1_8:
movdqa L$xts_magic(%rip),%xmm8 movdqa L$xts_magic(%rip),%xmm8
movdqa %xmm2,%xmm15 movdqa %xmm2,%xmm15
pshufd $95,%xmm2,%xmm9 pshufd $0x5f,%xmm2,%xmm9
pxor %xmm0,%xmm1 pxor %xmm0,%xmm1
movdqa %xmm9,%xmm14 movdqa %xmm9,%xmm14
paddd %xmm9,%xmm9 paddd %xmm9,%xmm9
@ -1705,7 +1705,7 @@ L$xts_enc_grandloop:
.byte 102,15,56,220,248 .byte 102,15,56,220,248
movups 64(%r11),%xmm0 movups 64(%r11),%xmm0
movdqa %xmm8,80(%rsp) movdqa %xmm8,80(%rsp)
pshufd $95,%xmm15,%xmm9 pshufd $0x5f,%xmm15,%xmm9
jmp L$xts_enc_loop6 jmp L$xts_enc_loop6
.p2align 5 .p2align 5
L$xts_enc_loop6: L$xts_enc_loop6:
@ -1844,13 +1844,13 @@ L$xts_enc_short:
jz L$xts_enc_done jz L$xts_enc_done
pxor %xmm0,%xmm11 pxor %xmm0,%xmm11
cmpq $32,%rdx cmpq $0x20,%rdx
jb L$xts_enc_one jb L$xts_enc_one
pxor %xmm0,%xmm12 pxor %xmm0,%xmm12
je L$xts_enc_two je L$xts_enc_two
pxor %xmm0,%xmm13 pxor %xmm0,%xmm13
cmpq $64,%rdx cmpq $0x40,%rdx
jb L$xts_enc_three jb L$xts_enc_three
pxor %xmm0,%xmm14 pxor %xmm0,%xmm14
je L$xts_enc_four je L$xts_enc_four
@ -2078,7 +2078,7 @@ L$oop_enc1_11:
movdqa L$xts_magic(%rip),%xmm8 movdqa L$xts_magic(%rip),%xmm8
movdqa %xmm2,%xmm15 movdqa %xmm2,%xmm15
pshufd $95,%xmm2,%xmm9 pshufd $0x5f,%xmm2,%xmm9
pxor %xmm0,%xmm1 pxor %xmm0,%xmm1
movdqa %xmm9,%xmm14 movdqa %xmm9,%xmm14
paddd %xmm9,%xmm9 paddd %xmm9,%xmm9
@ -2177,7 +2177,7 @@ L$xts_dec_grandloop:
.byte 102,15,56,222,248 .byte 102,15,56,222,248
movups 64(%r11),%xmm0 movups 64(%r11),%xmm0
movdqa %xmm8,80(%rsp) movdqa %xmm8,80(%rsp)
pshufd $95,%xmm15,%xmm9 pshufd $0x5f,%xmm15,%xmm9
jmp L$xts_dec_loop6 jmp L$xts_dec_loop6
.p2align 5 .p2align 5
L$xts_dec_loop6: L$xts_dec_loop6:
@ -2317,13 +2317,13 @@ L$xts_dec_short:
jz L$xts_dec_done jz L$xts_dec_done
pxor %xmm0,%xmm12 pxor %xmm0,%xmm12
cmpq $32,%rdx cmpq $0x20,%rdx
jb L$xts_dec_one jb L$xts_dec_one
pxor %xmm0,%xmm13 pxor %xmm0,%xmm13
je L$xts_dec_two je L$xts_dec_two
pxor %xmm0,%xmm14 pxor %xmm0,%xmm14
cmpq $64,%rdx cmpq $0x40,%rdx
jb L$xts_dec_three jb L$xts_dec_three
je L$xts_dec_four je L$xts_dec_four
@ -2354,7 +2354,7 @@ L$xts_dec_short:
pcmpgtd %xmm15,%xmm14 pcmpgtd %xmm15,%xmm14
movdqu %xmm6,64(%rsi) movdqu %xmm6,64(%rsi)
leaq 80(%rsi),%rsi leaq 80(%rsi),%rsi
pshufd $19,%xmm14,%xmm11 pshufd $0x13,%xmm14,%xmm11
andq $15,%r9 andq $15,%r9
jz L$xts_dec_ret jz L$xts_dec_ret
@ -2644,7 +2644,7 @@ L$cbc_decrypt_bulk:
leaq -8(%rax),%rbp leaq -8(%rax),%rbp
movups (%r8),%xmm10 movups (%r8),%xmm10
movl %r10d,%eax movl %r10d,%eax
cmpq $80,%rdx cmpq $0x50,%rdx
jbe L$cbc_dec_tail jbe L$cbc_dec_tail
movups (%rcx),%xmm0 movups (%rcx),%xmm0
@ -2660,14 +2660,14 @@ L$cbc_decrypt_bulk:
movdqu 80(%rdi),%xmm7 movdqu 80(%rdi),%xmm7
movdqa %xmm6,%xmm15 movdqa %xmm6,%xmm15
movl _OPENSSL_ia32cap_P+4(%rip),%r9d movl _OPENSSL_ia32cap_P+4(%rip),%r9d
cmpq $112,%rdx cmpq $0x70,%rdx
jbe L$cbc_dec_six_or_seven jbe L$cbc_dec_six_or_seven
andl $71303168,%r9d andl $71303168,%r9d
subq $80,%rdx subq $0x50,%rdx
cmpl $4194304,%r9d cmpl $4194304,%r9d
je L$cbc_dec_loop6_enter je L$cbc_dec_loop6_enter
subq $32,%rdx subq $0x20,%rdx
leaq 112(%rcx),%rcx leaq 112(%rcx),%rcx
jmp L$cbc_dec_loop8_enter jmp L$cbc_dec_loop8_enter
.p2align 4 .p2align 4
@ -2682,7 +2682,7 @@ L$cbc_dec_loop8_enter:
movups 16-112(%rcx),%xmm1 movups 16-112(%rcx),%xmm1
pxor %xmm0,%xmm4 pxor %xmm0,%xmm4
xorq %r11,%r11 xorq %r11,%r11
cmpq $112,%rdx cmpq $0x70,%rdx
pxor %xmm0,%xmm5 pxor %xmm0,%xmm5
pxor %xmm0,%xmm6 pxor %xmm0,%xmm6
pxor %xmm0,%xmm7 pxor %xmm0,%xmm7
@ -2867,21 +2867,21 @@ L$cbc_dec_done:
movups %xmm8,96(%rsi) movups %xmm8,96(%rsi)
leaq 112(%rsi),%rsi leaq 112(%rsi),%rsi
subq $128,%rdx subq $0x80,%rdx
ja L$cbc_dec_loop8 ja L$cbc_dec_loop8
movaps %xmm9,%xmm2 movaps %xmm9,%xmm2
leaq -112(%rcx),%rcx leaq -112(%rcx),%rcx
addq $112,%rdx addq $0x70,%rdx
jle L$cbc_dec_clear_tail_collected jle L$cbc_dec_clear_tail_collected
movups %xmm9,(%rsi) movups %xmm9,(%rsi)
leaq 16(%rsi),%rsi leaq 16(%rsi),%rsi
cmpq $80,%rdx cmpq $0x50,%rdx
jbe L$cbc_dec_tail jbe L$cbc_dec_tail
movaps %xmm11,%xmm2 movaps %xmm11,%xmm2
L$cbc_dec_six_or_seven: L$cbc_dec_six_or_seven:
cmpq $96,%rdx cmpq $0x60,%rdx
ja L$cbc_dec_seven ja L$cbc_dec_seven
movaps %xmm7,%xmm8 movaps %xmm7,%xmm8
@ -2974,33 +2974,33 @@ L$cbc_dec_loop6_enter:
movl %r10d,%eax movl %r10d,%eax
movdqu %xmm6,64(%rsi) movdqu %xmm6,64(%rsi)
leaq 80(%rsi),%rsi leaq 80(%rsi),%rsi
subq $96,%rdx subq $0x60,%rdx
ja L$cbc_dec_loop6 ja L$cbc_dec_loop6
movdqa %xmm7,%xmm2 movdqa %xmm7,%xmm2
addq $80,%rdx addq $0x50,%rdx
jle L$cbc_dec_clear_tail_collected jle L$cbc_dec_clear_tail_collected
movups %xmm7,(%rsi) movups %xmm7,(%rsi)
leaq 16(%rsi),%rsi leaq 16(%rsi),%rsi
L$cbc_dec_tail: L$cbc_dec_tail:
movups (%rdi),%xmm2 movups (%rdi),%xmm2
subq $16,%rdx subq $0x10,%rdx
jbe L$cbc_dec_one jbe L$cbc_dec_one
movups 16(%rdi),%xmm3 movups 16(%rdi),%xmm3
movaps %xmm2,%xmm11 movaps %xmm2,%xmm11
subq $16,%rdx subq $0x10,%rdx
jbe L$cbc_dec_two jbe L$cbc_dec_two
movups 32(%rdi),%xmm4 movups 32(%rdi),%xmm4
movaps %xmm3,%xmm12 movaps %xmm3,%xmm12
subq $16,%rdx subq $0x10,%rdx
jbe L$cbc_dec_three jbe L$cbc_dec_three
movups 48(%rdi),%xmm5 movups 48(%rdi),%xmm5
movaps %xmm4,%xmm13 movaps %xmm4,%xmm13
subq $16,%rdx subq $0x10,%rdx
jbe L$cbc_dec_four jbe L$cbc_dec_four
movups 64(%rdi),%xmm6 movups 64(%rdi),%xmm6
@ -3025,7 +3025,7 @@ L$cbc_dec_tail:
movdqa %xmm6,%xmm2 movdqa %xmm6,%xmm2
pxor %xmm6,%xmm6 pxor %xmm6,%xmm6
pxor %xmm7,%xmm7 pxor %xmm7,%xmm7
subq $16,%rdx subq $0x10,%rdx
jmp L$cbc_dec_tail_collected jmp L$cbc_dec_tail_collected
.p2align 4 .p2align 4
@ -3344,7 +3344,7 @@ L$oop_key192:
pslldq $4,%xmm0 pslldq $4,%xmm0
pxor %xmm3,%xmm0 pxor %xmm3,%xmm0
pshufd $255,%xmm0,%xmm3 pshufd $0xff,%xmm0,%xmm3
pxor %xmm1,%xmm3 pxor %xmm1,%xmm3
pslldq $4,%xmm1 pslldq $4,%xmm1
pxor %xmm1,%xmm3 pxor %xmm1,%xmm3
@ -3431,7 +3431,7 @@ L$oop_key256:
decl %r10d decl %r10d
jz L$done_key256 jz L$done_key256
pshufd $255,%xmm0,%xmm2 pshufd $0xff,%xmm0,%xmm2
pxor %xmm3,%xmm3 pxor %xmm3,%xmm3
.byte 102,15,56,221,211 .byte 102,15,56,221,211

View File

@ -325,45 +325,45 @@ L$enc_sbox:
pxor %xmm2,%xmm5 pxor %xmm2,%xmm5
decl %r10d decl %r10d
jl L$enc_done jl L$enc_done
pshufd $147,%xmm15,%xmm7 pshufd $0x93,%xmm15,%xmm7
pshufd $147,%xmm0,%xmm8 pshufd $0x93,%xmm0,%xmm8
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pshufd $147,%xmm3,%xmm9 pshufd $0x93,%xmm3,%xmm9
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pshufd $147,%xmm5,%xmm10 pshufd $0x93,%xmm5,%xmm10
pxor %xmm9,%xmm3 pxor %xmm9,%xmm3
pshufd $147,%xmm2,%xmm11 pshufd $0x93,%xmm2,%xmm11
pxor %xmm10,%xmm5 pxor %xmm10,%xmm5
pshufd $147,%xmm6,%xmm12 pshufd $0x93,%xmm6,%xmm12
pxor %xmm11,%xmm2 pxor %xmm11,%xmm2
pshufd $147,%xmm1,%xmm13 pshufd $0x93,%xmm1,%xmm13
pxor %xmm12,%xmm6 pxor %xmm12,%xmm6
pshufd $147,%xmm4,%xmm14 pshufd $0x93,%xmm4,%xmm14
pxor %xmm13,%xmm1 pxor %xmm13,%xmm1
pxor %xmm14,%xmm4 pxor %xmm14,%xmm4
pxor %xmm15,%xmm8 pxor %xmm15,%xmm8
pxor %xmm4,%xmm7 pxor %xmm4,%xmm7
pxor %xmm4,%xmm8 pxor %xmm4,%xmm8
pshufd $78,%xmm15,%xmm15 pshufd $0x4E,%xmm15,%xmm15
pxor %xmm0,%xmm9 pxor %xmm0,%xmm9
pshufd $78,%xmm0,%xmm0 pshufd $0x4E,%xmm0,%xmm0
pxor %xmm2,%xmm12 pxor %xmm2,%xmm12
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pxor %xmm6,%xmm13 pxor %xmm6,%xmm13
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pxor %xmm5,%xmm11 pxor %xmm5,%xmm11
pshufd $78,%xmm2,%xmm7 pshufd $0x4E,%xmm2,%xmm7
pxor %xmm1,%xmm14 pxor %xmm1,%xmm14
pshufd $78,%xmm6,%xmm8 pshufd $0x4E,%xmm6,%xmm8
pxor %xmm3,%xmm10 pxor %xmm3,%xmm10
pshufd $78,%xmm5,%xmm2 pshufd $0x4E,%xmm5,%xmm2
pxor %xmm4,%xmm10 pxor %xmm4,%xmm10
pshufd $78,%xmm4,%xmm6 pshufd $0x4E,%xmm4,%xmm6
pxor %xmm4,%xmm11 pxor %xmm4,%xmm11
pshufd $78,%xmm1,%xmm5 pshufd $0x4E,%xmm1,%xmm5
pxor %xmm11,%xmm7 pxor %xmm11,%xmm7
pshufd $78,%xmm3,%xmm1 pshufd $0x4E,%xmm3,%xmm1
pxor %xmm12,%xmm8 pxor %xmm12,%xmm8
pxor %xmm10,%xmm2 pxor %xmm10,%xmm2
pxor %xmm14,%xmm6 pxor %xmm14,%xmm6
@ -797,24 +797,24 @@ L$dec_sbox:
decl %r10d decl %r10d
jl L$dec_done jl L$dec_done
pshufd $78,%xmm15,%xmm7 pshufd $0x4E,%xmm15,%xmm7
pshufd $78,%xmm2,%xmm13 pshufd $0x4E,%xmm2,%xmm13
pxor %xmm15,%xmm7 pxor %xmm15,%xmm7
pshufd $78,%xmm4,%xmm14 pshufd $0x4E,%xmm4,%xmm14
pxor %xmm2,%xmm13 pxor %xmm2,%xmm13
pshufd $78,%xmm0,%xmm8 pshufd $0x4E,%xmm0,%xmm8
pxor %xmm4,%xmm14 pxor %xmm4,%xmm14
pshufd $78,%xmm5,%xmm9 pshufd $0x4E,%xmm5,%xmm9
pxor %xmm0,%xmm8 pxor %xmm0,%xmm8
pshufd $78,%xmm3,%xmm10 pshufd $0x4E,%xmm3,%xmm10
pxor %xmm5,%xmm9 pxor %xmm5,%xmm9
pxor %xmm13,%xmm15 pxor %xmm13,%xmm15
pxor %xmm13,%xmm0 pxor %xmm13,%xmm0
pshufd $78,%xmm1,%xmm11 pshufd $0x4E,%xmm1,%xmm11
pxor %xmm3,%xmm10 pxor %xmm3,%xmm10
pxor %xmm7,%xmm5 pxor %xmm7,%xmm5
pxor %xmm8,%xmm3 pxor %xmm8,%xmm3
pshufd $78,%xmm6,%xmm12 pshufd $0x4E,%xmm6,%xmm12
pxor %xmm1,%xmm11 pxor %xmm1,%xmm11
pxor %xmm14,%xmm0 pxor %xmm14,%xmm0
pxor %xmm9,%xmm1 pxor %xmm9,%xmm1
@ -828,45 +828,45 @@ L$dec_sbox:
pxor %xmm14,%xmm1 pxor %xmm14,%xmm1
pxor %xmm14,%xmm6 pxor %xmm14,%xmm6
pxor %xmm12,%xmm4 pxor %xmm12,%xmm4
pshufd $147,%xmm15,%xmm7 pshufd $0x93,%xmm15,%xmm7
pshufd $147,%xmm0,%xmm8 pshufd $0x93,%xmm0,%xmm8
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pshufd $147,%xmm5,%xmm9 pshufd $0x93,%xmm5,%xmm9
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pshufd $147,%xmm3,%xmm10 pshufd $0x93,%xmm3,%xmm10
pxor %xmm9,%xmm5 pxor %xmm9,%xmm5
pshufd $147,%xmm1,%xmm11 pshufd $0x93,%xmm1,%xmm11
pxor %xmm10,%xmm3 pxor %xmm10,%xmm3
pshufd $147,%xmm6,%xmm12 pshufd $0x93,%xmm6,%xmm12
pxor %xmm11,%xmm1 pxor %xmm11,%xmm1
pshufd $147,%xmm2,%xmm13 pshufd $0x93,%xmm2,%xmm13
pxor %xmm12,%xmm6 pxor %xmm12,%xmm6
pshufd $147,%xmm4,%xmm14 pshufd $0x93,%xmm4,%xmm14
pxor %xmm13,%xmm2 pxor %xmm13,%xmm2
pxor %xmm14,%xmm4 pxor %xmm14,%xmm4
pxor %xmm15,%xmm8 pxor %xmm15,%xmm8
pxor %xmm4,%xmm7 pxor %xmm4,%xmm7
pxor %xmm4,%xmm8 pxor %xmm4,%xmm8
pshufd $78,%xmm15,%xmm15 pshufd $0x4E,%xmm15,%xmm15
pxor %xmm0,%xmm9 pxor %xmm0,%xmm9
pshufd $78,%xmm0,%xmm0 pshufd $0x4E,%xmm0,%xmm0
pxor %xmm1,%xmm12 pxor %xmm1,%xmm12
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pxor %xmm6,%xmm13 pxor %xmm6,%xmm13
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pxor %xmm3,%xmm11 pxor %xmm3,%xmm11
pshufd $78,%xmm1,%xmm7 pshufd $0x4E,%xmm1,%xmm7
pxor %xmm2,%xmm14 pxor %xmm2,%xmm14
pshufd $78,%xmm6,%xmm8 pshufd $0x4E,%xmm6,%xmm8
pxor %xmm5,%xmm10 pxor %xmm5,%xmm10
pshufd $78,%xmm3,%xmm1 pshufd $0x4E,%xmm3,%xmm1
pxor %xmm4,%xmm10 pxor %xmm4,%xmm10
pshufd $78,%xmm4,%xmm6 pshufd $0x4E,%xmm4,%xmm6
pxor %xmm4,%xmm11 pxor %xmm4,%xmm11
pshufd $78,%xmm2,%xmm3 pshufd $0x4E,%xmm2,%xmm3
pxor %xmm11,%xmm7 pxor %xmm11,%xmm7
pshufd $78,%xmm5,%xmm2 pshufd $0x4E,%xmm5,%xmm2
pxor %xmm12,%xmm8 pxor %xmm12,%xmm8
pxor %xmm1,%xmm10 pxor %xmm1,%xmm10
pxor %xmm14,%xmm6 pxor %xmm14,%xmm6
@ -1556,20 +1556,20 @@ L$xts_enc_prologue:
movdqa %xmm7,(%rax) movdqa %xmm7,(%rax)
andq $-16,%r14 andq $-16,%r14
subq $128,%rsp subq $0x80,%rsp
movdqa 32(%rbp),%xmm6 movdqa 32(%rbp),%xmm6
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa L$xts_magic(%rip),%xmm12 movdqa L$xts_magic(%rip),%xmm12
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
subq $128,%r14 subq $0x80,%r14
jc L$xts_enc_short jc L$xts_enc_short
jmp L$xts_enc_loop jmp L$xts_enc_loop
.p2align 4 .p2align 4
L$xts_enc_loop: L$xts_enc_loop:
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm15 movdqa %xmm6,%xmm15
movdqa %xmm6,0(%rsp) movdqa %xmm6,0(%rsp)
@ -1577,7 +1577,7 @@ L$xts_enc_loop:
pand %xmm12,%xmm13 pand %xmm12,%xmm13
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm0 movdqa %xmm6,%xmm0
movdqa %xmm6,16(%rsp) movdqa %xmm6,16(%rsp)
@ -1586,7 +1586,7 @@ L$xts_enc_loop:
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 0(%r12),%xmm7 movdqu 0(%r12),%xmm7
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm1 movdqa %xmm6,%xmm1
movdqa %xmm6,32(%rsp) movdqa %xmm6,32(%rsp)
@ -1596,7 +1596,7 @@ L$xts_enc_loop:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 16(%r12),%xmm8 movdqu 16(%r12),%xmm8
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm2 movdqa %xmm6,%xmm2
movdqa %xmm6,48(%rsp) movdqa %xmm6,48(%rsp)
@ -1606,7 +1606,7 @@ L$xts_enc_loop:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 32(%r12),%xmm9 movdqu 32(%r12),%xmm9
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm3 movdqa %xmm6,%xmm3
movdqa %xmm6,64(%rsp) movdqa %xmm6,64(%rsp)
@ -1616,7 +1616,7 @@ L$xts_enc_loop:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 48(%r12),%xmm10 movdqu 48(%r12),%xmm10
pxor %xmm9,%xmm1 pxor %xmm9,%xmm1
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm4 movdqa %xmm6,%xmm4
movdqa %xmm6,80(%rsp) movdqa %xmm6,80(%rsp)
@ -1626,7 +1626,7 @@ L$xts_enc_loop:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 64(%r12),%xmm11 movdqu 64(%r12),%xmm11
pxor %xmm10,%xmm2 pxor %xmm10,%xmm2
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm5 movdqa %xmm6,%xmm5
movdqa %xmm6,96(%rsp) movdqa %xmm6,96(%rsp)
@ -1670,20 +1670,20 @@ L$xts_enc_loop:
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa L$xts_magic(%rip),%xmm12 movdqa L$xts_magic(%rip),%xmm12
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
paddq %xmm6,%xmm6 paddq %xmm6,%xmm6
pand %xmm12,%xmm13 pand %xmm12,%xmm13
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
subq $128,%r14 subq $0x80,%r14
jnc L$xts_enc_loop jnc L$xts_enc_loop
L$xts_enc_short: L$xts_enc_short:
addq $128,%r14 addq $0x80,%r14
jz L$xts_enc_done jz L$xts_enc_done
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm15 movdqa %xmm6,%xmm15
movdqa %xmm6,0(%rsp) movdqa %xmm6,0(%rsp)
@ -1691,7 +1691,7 @@ L$xts_enc_short:
pand %xmm12,%xmm13 pand %xmm12,%xmm13
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm0 movdqa %xmm6,%xmm0
movdqa %xmm6,16(%rsp) movdqa %xmm6,16(%rsp)
@ -1702,7 +1702,7 @@ L$xts_enc_short:
movdqu 0(%r12),%xmm7 movdqu 0(%r12),%xmm7
cmpq $16,%r14 cmpq $16,%r14
je L$xts_enc_1 je L$xts_enc_1
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm1 movdqa %xmm6,%xmm1
movdqa %xmm6,32(%rsp) movdqa %xmm6,32(%rsp)
@ -1714,7 +1714,7 @@ L$xts_enc_short:
cmpq $32,%r14 cmpq $32,%r14
je L$xts_enc_2 je L$xts_enc_2
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm2 movdqa %xmm6,%xmm2
movdqa %xmm6,48(%rsp) movdqa %xmm6,48(%rsp)
@ -1726,7 +1726,7 @@ L$xts_enc_short:
cmpq $48,%r14 cmpq $48,%r14
je L$xts_enc_3 je L$xts_enc_3
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm3 movdqa %xmm6,%xmm3
movdqa %xmm6,64(%rsp) movdqa %xmm6,64(%rsp)
@ -1738,7 +1738,7 @@ L$xts_enc_short:
cmpq $64,%r14 cmpq $64,%r14
je L$xts_enc_4 je L$xts_enc_4
pxor %xmm9,%xmm1 pxor %xmm9,%xmm1
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm4 movdqa %xmm6,%xmm4
movdqa %xmm6,80(%rsp) movdqa %xmm6,80(%rsp)
@ -1750,7 +1750,7 @@ L$xts_enc_short:
cmpq $80,%r14 cmpq $80,%r14
je L$xts_enc_5 je L$xts_enc_5
pxor %xmm10,%xmm2 pxor %xmm10,%xmm2
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm5 movdqa %xmm6,%xmm5
movdqa %xmm6,96(%rsp) movdqa %xmm6,96(%rsp)
@ -2016,20 +2016,20 @@ L$xts_dec_prologue:
shlq $4,%rax shlq $4,%rax
subq %rax,%r14 subq %rax,%r14
subq $128,%rsp subq $0x80,%rsp
movdqa 32(%rbp),%xmm6 movdqa 32(%rbp),%xmm6
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa L$xts_magic(%rip),%xmm12 movdqa L$xts_magic(%rip),%xmm12
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
subq $128,%r14 subq $0x80,%r14
jc L$xts_dec_short jc L$xts_dec_short
jmp L$xts_dec_loop jmp L$xts_dec_loop
.p2align 4 .p2align 4
L$xts_dec_loop: L$xts_dec_loop:
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm15 movdqa %xmm6,%xmm15
movdqa %xmm6,0(%rsp) movdqa %xmm6,0(%rsp)
@ -2037,7 +2037,7 @@ L$xts_dec_loop:
pand %xmm12,%xmm13 pand %xmm12,%xmm13
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm0 movdqa %xmm6,%xmm0
movdqa %xmm6,16(%rsp) movdqa %xmm6,16(%rsp)
@ -2046,7 +2046,7 @@ L$xts_dec_loop:
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 0(%r12),%xmm7 movdqu 0(%r12),%xmm7
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm1 movdqa %xmm6,%xmm1
movdqa %xmm6,32(%rsp) movdqa %xmm6,32(%rsp)
@ -2056,7 +2056,7 @@ L$xts_dec_loop:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 16(%r12),%xmm8 movdqu 16(%r12),%xmm8
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm2 movdqa %xmm6,%xmm2
movdqa %xmm6,48(%rsp) movdqa %xmm6,48(%rsp)
@ -2066,7 +2066,7 @@ L$xts_dec_loop:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 32(%r12),%xmm9 movdqu 32(%r12),%xmm9
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm3 movdqa %xmm6,%xmm3
movdqa %xmm6,64(%rsp) movdqa %xmm6,64(%rsp)
@ -2076,7 +2076,7 @@ L$xts_dec_loop:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 48(%r12),%xmm10 movdqu 48(%r12),%xmm10
pxor %xmm9,%xmm1 pxor %xmm9,%xmm1
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm4 movdqa %xmm6,%xmm4
movdqa %xmm6,80(%rsp) movdqa %xmm6,80(%rsp)
@ -2086,7 +2086,7 @@ L$xts_dec_loop:
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
movdqu 64(%r12),%xmm11 movdqu 64(%r12),%xmm11
pxor %xmm10,%xmm2 pxor %xmm10,%xmm2
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm5 movdqa %xmm6,%xmm5
movdqa %xmm6,96(%rsp) movdqa %xmm6,96(%rsp)
@ -2130,20 +2130,20 @@ L$xts_dec_loop:
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa L$xts_magic(%rip),%xmm12 movdqa L$xts_magic(%rip),%xmm12
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
paddq %xmm6,%xmm6 paddq %xmm6,%xmm6
pand %xmm12,%xmm13 pand %xmm12,%xmm13
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
subq $128,%r14 subq $0x80,%r14
jnc L$xts_dec_loop jnc L$xts_dec_loop
L$xts_dec_short: L$xts_dec_short:
addq $128,%r14 addq $0x80,%r14
jz L$xts_dec_done jz L$xts_dec_done
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm15 movdqa %xmm6,%xmm15
movdqa %xmm6,0(%rsp) movdqa %xmm6,0(%rsp)
@ -2151,7 +2151,7 @@ L$xts_dec_short:
pand %xmm12,%xmm13 pand %xmm12,%xmm13
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pxor %xmm13,%xmm6 pxor %xmm13,%xmm6
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm0 movdqa %xmm6,%xmm0
movdqa %xmm6,16(%rsp) movdqa %xmm6,16(%rsp)
@ -2162,7 +2162,7 @@ L$xts_dec_short:
movdqu 0(%r12),%xmm7 movdqu 0(%r12),%xmm7
cmpq $16,%r14 cmpq $16,%r14
je L$xts_dec_1 je L$xts_dec_1
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm1 movdqa %xmm6,%xmm1
movdqa %xmm6,32(%rsp) movdqa %xmm6,32(%rsp)
@ -2174,7 +2174,7 @@ L$xts_dec_short:
cmpq $32,%r14 cmpq $32,%r14
je L$xts_dec_2 je L$xts_dec_2
pxor %xmm7,%xmm15 pxor %xmm7,%xmm15
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm2 movdqa %xmm6,%xmm2
movdqa %xmm6,48(%rsp) movdqa %xmm6,48(%rsp)
@ -2186,7 +2186,7 @@ L$xts_dec_short:
cmpq $48,%r14 cmpq $48,%r14
je L$xts_dec_3 je L$xts_dec_3
pxor %xmm8,%xmm0 pxor %xmm8,%xmm0
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm3 movdqa %xmm6,%xmm3
movdqa %xmm6,64(%rsp) movdqa %xmm6,64(%rsp)
@ -2198,7 +2198,7 @@ L$xts_dec_short:
cmpq $64,%r14 cmpq $64,%r14
je L$xts_dec_4 je L$xts_dec_4
pxor %xmm9,%xmm1 pxor %xmm9,%xmm1
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm4 movdqa %xmm6,%xmm4
movdqa %xmm6,80(%rsp) movdqa %xmm6,80(%rsp)
@ -2210,7 +2210,7 @@ L$xts_dec_short:
cmpq $80,%r14 cmpq $80,%r14
je L$xts_dec_5 je L$xts_dec_5
pxor %xmm10,%xmm2 pxor %xmm10,%xmm2
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa %xmm6,%xmm5 movdqa %xmm6,%xmm5
movdqa %xmm6,96(%rsp) movdqa %xmm6,96(%rsp)
@ -2387,7 +2387,7 @@ L$xts_dec_done:
pxor %xmm14,%xmm14 pxor %xmm14,%xmm14
movdqa L$xts_magic(%rip),%xmm12 movdqa L$xts_magic(%rip),%xmm12
pcmpgtd %xmm6,%xmm14 pcmpgtd %xmm6,%xmm14
pshufd $19,%xmm14,%xmm13 pshufd $0x13,%xmm14,%xmm13
movdqa %xmm6,%xmm5 movdqa %xmm6,%xmm5
paddq %xmm6,%xmm6 paddq %xmm6,%xmm6
pand %xmm12,%xmm13 pand %xmm12,%xmm13

View File

@ -61,7 +61,7 @@ L$enc_loop:
addq $16,%r11 addq $16,%r11
pxor %xmm0,%xmm3 pxor %xmm0,%xmm3
.byte 102,15,56,0,193 .byte 102,15,56,0,193
andq $48,%r11 andq $0x30,%r11
subq $1,%rax subq $1,%rax
pxor %xmm3,%xmm0 pxor %xmm3,%xmm0
@ -121,10 +121,10 @@ _vpaes_decrypt_core:
pand %xmm9,%xmm0 pand %xmm9,%xmm0
.byte 102,15,56,0,208 .byte 102,15,56,0,208
movdqa L$k_dipt+16(%rip),%xmm0 movdqa L$k_dipt+16(%rip),%xmm0
xorq $48,%r11 xorq $0x30,%r11
leaq L$k_dsbd(%rip),%r10 leaq L$k_dsbd(%rip),%r10
.byte 102,15,56,0,193 .byte 102,15,56,0,193
andq $48,%r11 andq $0x30,%r11
pxor %xmm5,%xmm2 pxor %xmm5,%xmm2
movdqa L$k_mc_forward+48(%rip),%xmm5 movdqa L$k_mc_forward+48(%rip),%xmm5
pxor %xmm2,%xmm0 pxor %xmm2,%xmm0
@ -243,7 +243,7 @@ L$schedule_am_decrypting:
movdqa (%r8,%r10,1),%xmm1 movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,217 .byte 102,15,56,0,217
movdqu %xmm3,(%rdx) movdqu %xmm3,(%rdx)
xorq $48,%r8 xorq $0x30,%r8
L$schedule_go: L$schedule_go:
cmpl $192,%esi cmpl $192,%esi
@ -333,7 +333,7 @@ L$oop_schedule_256:
call _vpaes_schedule_mangle call _vpaes_schedule_mangle
pshufd $255,%xmm0,%xmm0 pshufd $0xFF,%xmm0,%xmm0
movdqa %xmm7,%xmm5 movdqa %xmm7,%xmm5
movdqa %xmm6,%xmm7 movdqa %xmm6,%xmm7
call _vpaes_schedule_low_round call _vpaes_schedule_low_round
@ -400,8 +400,8 @@ L$schedule_mangle_last_dec:
.p2align 4 .p2align 4
_vpaes_schedule_192_smear: _vpaes_schedule_192_smear:
pshufd $128,%xmm6,%xmm1 pshufd $0x80,%xmm6,%xmm1
pshufd $254,%xmm7,%xmm0 pshufd $0xFE,%xmm7,%xmm0
pxor %xmm1,%xmm6 pxor %xmm1,%xmm6
pxor %xmm1,%xmm1 pxor %xmm1,%xmm1
pxor %xmm0,%xmm6 pxor %xmm0,%xmm6
@ -438,7 +438,7 @@ _vpaes_schedule_round:
pxor %xmm1,%xmm7 pxor %xmm1,%xmm7
pshufd $255,%xmm0,%xmm0 pshufd $0xFF,%xmm0,%xmm0
.byte 102,15,58,15,192,1 .byte 102,15,58,15,192,1
@ -597,7 +597,7 @@ L$schedule_mangle_both:
movdqa (%r8,%r10,1),%xmm1 movdqa (%r8,%r10,1),%xmm1
.byte 102,15,56,0,217 .byte 102,15,56,0,217
addq $-16,%r8 addq $-16,%r8
andq $48,%r8 andq $0x30,%r8
movdqu %xmm3,(%rdx) movdqu %xmm3,(%rdx)
.byte 0xf3,0xc3 .byte 0xf3,0xc3
@ -616,7 +616,7 @@ _vpaes_set_encrypt_key:
movl %eax,240(%rdx) movl %eax,240(%rdx)
movl $0,%ecx movl $0,%ecx
movl $48,%r8d movl $0x30,%r8d
call _vpaes_schedule_core call _vpaes_schedule_core
xorl %eax,%eax xorl %eax,%eax
.byte 0xf3,0xc3 .byte 0xf3,0xc3

View File

@ -465,48 +465,94 @@ _rsaz_512_mul_gather4:
pushq %r14 pushq %r14
pushq %r15 pushq %r15
movl %r9d,%r9d subq $152,%rsp
subq $128+24,%rsp
L$mul_gather4_body: L$mul_gather4_body:
movl 64(%rdx,%r9,4),%eax movd %r9d,%xmm8
.byte 102,72,15,110,199 movdqa L$inc+16(%rip),%xmm1
movl (%rdx,%r9,4),%ebx movdqa L$inc(%rip),%xmm0
.byte 102,72,15,110,201
movq %r8,128(%rsp) pshufd $0,%xmm8,%xmm8
movdqa %xmm1,%xmm7
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm8,%xmm0
movdqa %xmm7,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm8,%xmm1
movdqa %xmm7,%xmm4
paddd %xmm2,%xmm3
pcmpeqd %xmm8,%xmm2
movdqa %xmm7,%xmm5
paddd %xmm3,%xmm4
pcmpeqd %xmm8,%xmm3
movdqa %xmm7,%xmm6
paddd %xmm4,%xmm5
pcmpeqd %xmm8,%xmm4
paddd %xmm5,%xmm6
pcmpeqd %xmm8,%xmm5
paddd %xmm6,%xmm7
pcmpeqd %xmm8,%xmm6
pcmpeqd %xmm8,%xmm7
movdqa 0(%rdx),%xmm8
movdqa 16(%rdx),%xmm9
movdqa 32(%rdx),%xmm10
movdqa 48(%rdx),%xmm11
pand %xmm0,%xmm8
movdqa 64(%rdx),%xmm12
pand %xmm1,%xmm9
movdqa 80(%rdx),%xmm13
pand %xmm2,%xmm10
movdqa 96(%rdx),%xmm14
pand %xmm3,%xmm11
movdqa 112(%rdx),%xmm15
leaq 128(%rdx),%rbp
pand %xmm4,%xmm12
pand %xmm5,%xmm13
pand %xmm6,%xmm14
pand %xmm7,%xmm15
por %xmm10,%xmm8
por %xmm11,%xmm9
por %xmm12,%xmm8
por %xmm13,%xmm9
por %xmm14,%xmm8
por %xmm15,%xmm9
por %xmm9,%xmm8
pshufd $0x4e,%xmm8,%xmm9
por %xmm9,%xmm8
.byte 102,76,15,126,195
movq %r8,128(%rsp)
movq %rdi,128+8(%rsp)
movq %rcx,128+16(%rsp)
shlq $32,%rax
orq %rax,%rbx
movq (%rsi),%rax movq (%rsi),%rax
movq 8(%rsi),%rcx movq 8(%rsi),%rcx
leaq 128(%rdx,%r9,4),%rbp
mulq %rbx mulq %rbx
movq %rax,(%rsp) movq %rax,(%rsp)
movq %rcx,%rax movq %rcx,%rax
movq %rdx,%r8 movq %rdx,%r8
mulq %rbx mulq %rbx
movd (%rbp),%xmm4
addq %rax,%r8 addq %rax,%r8
movq 16(%rsi),%rax movq 16(%rsi),%rax
movq %rdx,%r9 movq %rdx,%r9
adcq $0,%r9 adcq $0,%r9
mulq %rbx mulq %rbx
movd 64(%rbp),%xmm5
addq %rax,%r9 addq %rax,%r9
movq 24(%rsi),%rax movq 24(%rsi),%rax
movq %rdx,%r10 movq %rdx,%r10
adcq $0,%r10 adcq $0,%r10
mulq %rbx mulq %rbx
pslldq $4,%xmm5
addq %rax,%r10 addq %rax,%r10
movq 32(%rsi),%rax movq 32(%rsi),%rax
movq %rdx,%r11 movq %rdx,%r11
adcq $0,%r11 adcq $0,%r11
mulq %rbx mulq %rbx
por %xmm5,%xmm4
addq %rax,%r11 addq %rax,%r11
movq 40(%rsi),%rax movq 40(%rsi),%rax
movq %rdx,%r12 movq %rdx,%r12
@ -519,14 +565,12 @@ L$mul_gather4_body:
adcq $0,%r13 adcq $0,%r13
mulq %rbx mulq %rbx
leaq 128(%rbp),%rbp
addq %rax,%r13 addq %rax,%r13
movq 56(%rsi),%rax movq 56(%rsi),%rax
movq %rdx,%r14 movq %rdx,%r14
adcq $0,%r14 adcq $0,%r14
mulq %rbx mulq %rbx
.byte 102,72,15,126,227
addq %rax,%r14 addq %rax,%r14
movq (%rsi),%rax movq (%rsi),%rax
movq %rdx,%r15 movq %rdx,%r15
@ -538,6 +582,35 @@ L$mul_gather4_body:
.p2align 5 .p2align 5
L$oop_mul_gather: L$oop_mul_gather:
movdqa 0(%rbp),%xmm8
movdqa 16(%rbp),%xmm9
movdqa 32(%rbp),%xmm10
movdqa 48(%rbp),%xmm11
pand %xmm0,%xmm8
movdqa 64(%rbp),%xmm12
pand %xmm1,%xmm9
movdqa 80(%rbp),%xmm13
pand %xmm2,%xmm10
movdqa 96(%rbp),%xmm14
pand %xmm3,%xmm11
movdqa 112(%rbp),%xmm15
leaq 128(%rbp),%rbp
pand %xmm4,%xmm12
pand %xmm5,%xmm13
pand %xmm6,%xmm14
pand %xmm7,%xmm15
por %xmm10,%xmm8
por %xmm11,%xmm9
por %xmm12,%xmm8
por %xmm13,%xmm9
por %xmm14,%xmm8
por %xmm15,%xmm9
por %xmm9,%xmm8
pshufd $0x4e,%xmm8,%xmm9
por %xmm9,%xmm8
.byte 102,76,15,126,195
mulq %rbx mulq %rbx
addq %rax,%r8 addq %rax,%r8
movq 8(%rsi),%rax movq 8(%rsi),%rax
@ -546,7 +619,6 @@ L$oop_mul_gather:
adcq $0,%r8 adcq $0,%r8
mulq %rbx mulq %rbx
movd (%rbp),%xmm4
addq %rax,%r9 addq %rax,%r9
movq 16(%rsi),%rax movq 16(%rsi),%rax
adcq $0,%rdx adcq $0,%rdx
@ -555,7 +627,6 @@ L$oop_mul_gather:
adcq $0,%r9 adcq $0,%r9
mulq %rbx mulq %rbx
movd 64(%rbp),%xmm5
addq %rax,%r10 addq %rax,%r10
movq 24(%rsi),%rax movq 24(%rsi),%rax
adcq $0,%rdx adcq $0,%rdx
@ -564,7 +635,6 @@ L$oop_mul_gather:
adcq $0,%r10 adcq $0,%r10
mulq %rbx mulq %rbx
pslldq $4,%xmm5
addq %rax,%r11 addq %rax,%r11
movq 32(%rsi),%rax movq 32(%rsi),%rax
adcq $0,%rdx adcq $0,%rdx
@ -573,7 +643,6 @@ L$oop_mul_gather:
adcq $0,%r11 adcq $0,%r11
mulq %rbx mulq %rbx
por %xmm5,%xmm4
addq %rax,%r12 addq %rax,%r12
movq 40(%rsi),%rax movq 40(%rsi),%rax
adcq $0,%rdx adcq $0,%rdx
@ -598,7 +667,6 @@ L$oop_mul_gather:
adcq $0,%r14 adcq $0,%r14
mulq %rbx mulq %rbx
.byte 102,72,15,126,227
addq %rax,%r15 addq %rax,%r15
movq (%rsi),%rax movq (%rsi),%rax
adcq $0,%rdx adcq $0,%rdx
@ -606,7 +674,6 @@ L$oop_mul_gather:
movq %rdx,%r15 movq %rdx,%r15
adcq $0,%r15 adcq $0,%r15
leaq 128(%rbp),%rbp
leaq 8(%rdi),%rdi leaq 8(%rdi),%rdi
decl %ecx decl %ecx
@ -621,8 +688,8 @@ L$oop_mul_gather:
movq %r14,48(%rdi) movq %r14,48(%rdi)
movq %r15,56(%rdi) movq %r15,56(%rdi)
.byte 102,72,15,126,199 movq 128+8(%rsp),%rdi
.byte 102,72,15,126,205 movq 128+16(%rsp),%rbp
movq (%rsp),%r8 movq (%rsp),%r8
movq 8(%rsp),%r9 movq 8(%rsp),%r9
@ -672,7 +739,7 @@ _rsaz_512_mul_scatter4:
movl %r9d,%r9d movl %r9d,%r9d
subq $128+24,%rsp subq $128+24,%rsp
L$mul_scatter4_body: L$mul_scatter4_body:
leaq (%r8,%r9,4),%r8 leaq (%r8,%r9,8),%r8
.byte 102,72,15,110,199 .byte 102,72,15,110,199
.byte 102,72,15,110,202 .byte 102,72,15,110,202
.byte 102,73,15,110,208 .byte 102,73,15,110,208
@ -708,30 +775,14 @@ L$mul_scatter4_body:
call __rsaz_512_subtract call __rsaz_512_subtract
movl %r8d,0(%rsi) movq %r8,0(%rsi)
shrq $32,%r8 movq %r9,128(%rsi)
movl %r9d,128(%rsi) movq %r10,256(%rsi)
shrq $32,%r9 movq %r11,384(%rsi)
movl %r10d,256(%rsi) movq %r12,512(%rsi)
shrq $32,%r10 movq %r13,640(%rsi)
movl %r11d,384(%rsi) movq %r14,768(%rsi)
shrq $32,%r11 movq %r15,896(%rsi)
movl %r12d,512(%rsi)
shrq $32,%r12
movl %r13d,640(%rsi)
shrq $32,%r13
movl %r14d,768(%rsi)
shrq $32,%r14
movl %r15d,896(%rsi)
shrq $32,%r15
movl %r8d,64(%rsi)
movl %r9d,192(%rsi)
movl %r10d,320(%rsi)
movl %r11d,448(%rsi)
movl %r12d,576(%rsi)
movl %r13d,704(%rsi)
movl %r14d,832(%rsi)
movl %r15d,960(%rsi)
leaq 128+24+48(%rsp),%rax leaq 128+24+48(%rsp),%rax
movq -48(%rax),%r15 movq -48(%rax),%r15
@ -1086,16 +1137,14 @@ L$oop_mul:
.p2align 4 .p2align 4
_rsaz_512_scatter4: _rsaz_512_scatter4:
leaq (%rdi,%rdx,4),%rdi leaq (%rdi,%rdx,8),%rdi
movl $8,%r9d movl $8,%r9d
jmp L$oop_scatter jmp L$oop_scatter
.p2align 4 .p2align 4
L$oop_scatter: L$oop_scatter:
movq (%rsi),%rax movq (%rsi),%rax
leaq 8(%rsi),%rsi leaq 8(%rsi),%rsi
movl %eax,(%rdi) movq %rax,(%rdi)
shrq $32,%rax
movl %eax,64(%rdi)
leaq 128(%rdi),%rdi leaq 128(%rdi),%rdi
decl %r9d decl %r9d
jnz L$oop_scatter jnz L$oop_scatter
@ -1107,20 +1156,73 @@ L$oop_scatter:
.p2align 4 .p2align 4
_rsaz_512_gather4: _rsaz_512_gather4:
leaq (%rsi,%rdx,4),%rsi movd %edx,%xmm8
movdqa L$inc+16(%rip),%xmm1
movdqa L$inc(%rip),%xmm0
pshufd $0,%xmm8,%xmm8
movdqa %xmm1,%xmm7
movdqa %xmm1,%xmm2
paddd %xmm0,%xmm1
pcmpeqd %xmm8,%xmm0
movdqa %xmm7,%xmm3
paddd %xmm1,%xmm2
pcmpeqd %xmm8,%xmm1
movdqa %xmm7,%xmm4
paddd %xmm2,%xmm3
pcmpeqd %xmm8,%xmm2
movdqa %xmm7,%xmm5
paddd %xmm3,%xmm4
pcmpeqd %xmm8,%xmm3
movdqa %xmm7,%xmm6
paddd %xmm4,%xmm5
pcmpeqd %xmm8,%xmm4
paddd %xmm5,%xmm6
pcmpeqd %xmm8,%xmm5
paddd %xmm6,%xmm7
pcmpeqd %xmm8,%xmm6
pcmpeqd %xmm8,%xmm7
movl $8,%r9d movl $8,%r9d
jmp L$oop_gather jmp L$oop_gather
.p2align 4 .p2align 4
L$oop_gather: L$oop_gather:
movl (%rsi),%eax movdqa 0(%rsi),%xmm8
movl 64(%rsi),%r8d movdqa 16(%rsi),%xmm9
movdqa 32(%rsi),%xmm10
movdqa 48(%rsi),%xmm11
pand %xmm0,%xmm8
movdqa 64(%rsi),%xmm12
pand %xmm1,%xmm9
movdqa 80(%rsi),%xmm13
pand %xmm2,%xmm10
movdqa 96(%rsi),%xmm14
pand %xmm3,%xmm11
movdqa 112(%rsi),%xmm15
leaq 128(%rsi),%rsi leaq 128(%rsi),%rsi
shlq $32,%r8 pand %xmm4,%xmm12
orq %r8,%rax pand %xmm5,%xmm13
movq %rax,(%rdi) pand %xmm6,%xmm14
pand %xmm7,%xmm15
por %xmm10,%xmm8
por %xmm11,%xmm9
por %xmm12,%xmm8
por %xmm13,%xmm9
por %xmm14,%xmm8
por %xmm15,%xmm9
por %xmm9,%xmm8
pshufd $0x4e,%xmm8,%xmm9
por %xmm9,%xmm8
movq %xmm8,(%rdi)
leaq 8(%rdi),%rdi leaq 8(%rdi),%rdi
decl %r9d decl %r9d
jnz L$oop_gather jnz L$oop_gather
.byte 0xf3,0xc3 .byte 0xf3,0xc3
L$SEH_end_rsaz_512_gather4:
.p2align 6
L$inc:
.long 0,0, 1,1
.long 2,2, 2,2
#endif #endif

View File

@ -634,20 +634,20 @@ L$sqr8x_enter:
leaq -64(%rsp,%r9,4),%r11 leaq -64(%rsp,%r9,2),%r11
movq (%r8),%r8 movq (%r8),%r8
subq %rsi,%r11 subq %rsi,%r11
andq $4095,%r11 andq $4095,%r11
cmpq %r11,%r10 cmpq %r11,%r10
jb L$sqr8x_sp_alt jb L$sqr8x_sp_alt
subq %r11,%rsp subq %r11,%rsp
leaq -64(%rsp,%r9,4),%rsp leaq -64(%rsp,%r9,2),%rsp
jmp L$sqr8x_sp_done jmp L$sqr8x_sp_done
.p2align 5 .p2align 5
L$sqr8x_sp_alt: L$sqr8x_sp_alt:
leaq 4096-64(,%r9,4),%r10 leaq 4096-64(,%r9,2),%r10
leaq -64(%rsp,%r9,4),%rsp leaq -64(%rsp,%r9,2),%rsp
subq %r10,%r11 subq %r10,%r11
movq $0,%r10 movq $0,%r10
cmovcq %r10,%r11 cmovcq %r10,%r11
@ -657,58 +657,80 @@ L$sqr8x_sp_done:
movq %r9,%r10 movq %r9,%r10
negq %r9 negq %r9
leaq 64(%rsp,%r9,2),%r11
movq %r8,32(%rsp) movq %r8,32(%rsp)
movq %rax,40(%rsp) movq %rax,40(%rsp)
L$sqr8x_body: L$sqr8x_body:
movq %r9,%rbp .byte 102,72,15,110,209
.byte 102,73,15,110,211
shrq $3+2,%rbp
movl _OPENSSL_ia32cap_P+8(%rip),%eax
jmp L$sqr8x_copy_n
.p2align 5
L$sqr8x_copy_n:
movq 0(%rcx),%xmm0
movq 8(%rcx),%xmm1
movq 16(%rcx),%xmm3
movq 24(%rcx),%xmm4
leaq 32(%rcx),%rcx
movdqa %xmm0,0(%r11)
movdqa %xmm1,16(%r11)
movdqa %xmm3,32(%r11)
movdqa %xmm4,48(%r11)
leaq 64(%r11),%r11
decq %rbp
jnz L$sqr8x_copy_n
pxor %xmm0,%xmm0 pxor %xmm0,%xmm0
.byte 102,72,15,110,207 .byte 102,72,15,110,207
.byte 102,73,15,110,218 .byte 102,73,15,110,218
call _bn_sqr8x_internal call _bn_sqr8x_internal
pxor %xmm0,%xmm0
leaq 48(%rsp),%rax
leaq 64(%rsp,%r9,2),%rdx
shrq $3+2,%r9 leaq (%rdi,%r9,1),%rbx
movq 40(%rsp),%rsi movq %r9,%rcx
jmp L$sqr8x_zero movq %r9,%rdx
.byte 102,72,15,126,207
sarq $3+2,%rcx
jmp L$sqr8x_sub
.p2align 5 .p2align 5
L$sqr8x_zero: L$sqr8x_sub:
movdqa %xmm0,0(%rax) movq 0(%rbx),%r12
movdqa %xmm0,16(%rax) movq 8(%rbx),%r13
movdqa %xmm0,32(%rax) movq 16(%rbx),%r14
movdqa %xmm0,48(%rax) movq 24(%rbx),%r15
leaq 64(%rax),%rax leaq 32(%rbx),%rbx
movdqa %xmm0,0(%rdx) sbbq 0(%rbp),%r12
movdqa %xmm0,16(%rdx) sbbq 8(%rbp),%r13
movdqa %xmm0,32(%rdx) sbbq 16(%rbp),%r14
movdqa %xmm0,48(%rdx) sbbq 24(%rbp),%r15
leaq 64(%rdx),%rdx leaq 32(%rbp),%rbp
decq %r9 movq %r12,0(%rdi)
jnz L$sqr8x_zero movq %r13,8(%rdi)
movq %r14,16(%rdi)
movq %r15,24(%rdi)
leaq 32(%rdi),%rdi
incq %rcx
jnz L$sqr8x_sub
sbbq $0,%rax
leaq (%rbx,%r9,1),%rbx
leaq (%rdi,%r9,1),%rdi
.byte 102,72,15,110,200
pxor %xmm0,%xmm0
pshufd $0,%xmm1,%xmm1
movq 40(%rsp),%rsi
jmp L$sqr8x_cond_copy
.p2align 5
L$sqr8x_cond_copy:
movdqa 0(%rbx),%xmm2
movdqa 16(%rbx),%xmm3
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm4
movdqu 16(%rdi),%xmm5
leaq 32(%rdi),%rdi
movdqa %xmm0,-32(%rbx)
movdqa %xmm0,-16(%rbx)
movdqa %xmm0,-32(%rbx,%rdx,1)
movdqa %xmm0,-16(%rbx,%rdx,1)
pcmpeqd %xmm1,%xmm0
pand %xmm1,%xmm2
pand %xmm1,%xmm3
pand %xmm0,%xmm4
pand %xmm0,%xmm5
pxor %xmm0,%xmm0
por %xmm2,%xmm4
por %xmm3,%xmm5
movdqu %xmm4,-32(%rdi)
movdqu %xmm5,-16(%rdi)
addq $32,%r9
jnz L$sqr8x_cond_copy
movq $1,%rax movq $1,%rax
movq -48(%rsi),%r15 movq -48(%rsi),%r15

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -495,14 +495,14 @@ L$loop:
movl %ecx,%r11d movl %ecx,%r11d
addl %ecx,%ebx addl %ecx,%ebx
movl 0(%rsi),%r10d movl 0(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
xorl %edx,%r11d xorl %edx,%r11d
leal -198630844(%rax,%r10,1),%eax leal -198630844(%rax,%r10,1),%eax
orl %ebx,%r11d orl %ebx,%r11d
xorl %ecx,%r11d xorl %ecx,%r11d
addl %r11d,%eax addl %r11d,%eax
movl 28(%rsi),%r10d movl 28(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $6,%eax roll $6,%eax
xorl %ecx,%r11d xorl %ecx,%r11d
addl %ebx,%eax addl %ebx,%eax
@ -511,7 +511,7 @@ L$loop:
xorl %ebx,%r11d xorl %ebx,%r11d
addl %r11d,%edx addl %r11d,%edx
movl 56(%rsi),%r10d movl 56(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $10,%edx roll $10,%edx
xorl %ebx,%r11d xorl %ebx,%r11d
addl %eax,%edx addl %eax,%edx
@ -520,7 +520,7 @@ L$loop:
xorl %eax,%r11d xorl %eax,%r11d
addl %r11d,%ecx addl %r11d,%ecx
movl 20(%rsi),%r10d movl 20(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $15,%ecx roll $15,%ecx
xorl %eax,%r11d xorl %eax,%r11d
addl %edx,%ecx addl %edx,%ecx
@ -529,7 +529,7 @@ L$loop:
xorl %edx,%r11d xorl %edx,%r11d
addl %r11d,%ebx addl %r11d,%ebx
movl 48(%rsi),%r10d movl 48(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $21,%ebx roll $21,%ebx
xorl %edx,%r11d xorl %edx,%r11d
addl %ecx,%ebx addl %ecx,%ebx
@ -538,7 +538,7 @@ L$loop:
xorl %ecx,%r11d xorl %ecx,%r11d
addl %r11d,%eax addl %r11d,%eax
movl 12(%rsi),%r10d movl 12(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $6,%eax roll $6,%eax
xorl %ecx,%r11d xorl %ecx,%r11d
addl %ebx,%eax addl %ebx,%eax
@ -547,7 +547,7 @@ L$loop:
xorl %ebx,%r11d xorl %ebx,%r11d
addl %r11d,%edx addl %r11d,%edx
movl 40(%rsi),%r10d movl 40(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $10,%edx roll $10,%edx
xorl %ebx,%r11d xorl %ebx,%r11d
addl %eax,%edx addl %eax,%edx
@ -556,7 +556,7 @@ L$loop:
xorl %eax,%r11d xorl %eax,%r11d
addl %r11d,%ecx addl %r11d,%ecx
movl 4(%rsi),%r10d movl 4(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $15,%ecx roll $15,%ecx
xorl %eax,%r11d xorl %eax,%r11d
addl %edx,%ecx addl %edx,%ecx
@ -565,7 +565,7 @@ L$loop:
xorl %edx,%r11d xorl %edx,%r11d
addl %r11d,%ebx addl %r11d,%ebx
movl 32(%rsi),%r10d movl 32(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $21,%ebx roll $21,%ebx
xorl %edx,%r11d xorl %edx,%r11d
addl %ecx,%ebx addl %ecx,%ebx
@ -574,7 +574,7 @@ L$loop:
xorl %ecx,%r11d xorl %ecx,%r11d
addl %r11d,%eax addl %r11d,%eax
movl 60(%rsi),%r10d movl 60(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $6,%eax roll $6,%eax
xorl %ecx,%r11d xorl %ecx,%r11d
addl %ebx,%eax addl %ebx,%eax
@ -583,7 +583,7 @@ L$loop:
xorl %ebx,%r11d xorl %ebx,%r11d
addl %r11d,%edx addl %r11d,%edx
movl 24(%rsi),%r10d movl 24(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $10,%edx roll $10,%edx
xorl %ebx,%r11d xorl %ebx,%r11d
addl %eax,%edx addl %eax,%edx
@ -592,7 +592,7 @@ L$loop:
xorl %eax,%r11d xorl %eax,%r11d
addl %r11d,%ecx addl %r11d,%ecx
movl 52(%rsi),%r10d movl 52(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $15,%ecx roll $15,%ecx
xorl %eax,%r11d xorl %eax,%r11d
addl %edx,%ecx addl %edx,%ecx
@ -601,7 +601,7 @@ L$loop:
xorl %edx,%r11d xorl %edx,%r11d
addl %r11d,%ebx addl %r11d,%ebx
movl 16(%rsi),%r10d movl 16(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $21,%ebx roll $21,%ebx
xorl %edx,%r11d xorl %edx,%r11d
addl %ecx,%ebx addl %ecx,%ebx
@ -610,7 +610,7 @@ L$loop:
xorl %ecx,%r11d xorl %ecx,%r11d
addl %r11d,%eax addl %r11d,%eax
movl 44(%rsi),%r10d movl 44(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $6,%eax roll $6,%eax
xorl %ecx,%r11d xorl %ecx,%r11d
addl %ebx,%eax addl %ebx,%eax
@ -619,7 +619,7 @@ L$loop:
xorl %ebx,%r11d xorl %ebx,%r11d
addl %r11d,%edx addl %r11d,%edx
movl 8(%rsi),%r10d movl 8(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $10,%edx roll $10,%edx
xorl %ebx,%r11d xorl %ebx,%r11d
addl %eax,%edx addl %eax,%edx
@ -628,7 +628,7 @@ L$loop:
xorl %eax,%r11d xorl %eax,%r11d
addl %r11d,%ecx addl %r11d,%ecx
movl 36(%rsi),%r10d movl 36(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $15,%ecx roll $15,%ecx
xorl %eax,%r11d xorl %eax,%r11d
addl %edx,%ecx addl %edx,%ecx
@ -637,7 +637,7 @@ L$loop:
xorl %edx,%r11d xorl %edx,%r11d
addl %r11d,%ebx addl %r11d,%ebx
movl 0(%rsi),%r10d movl 0(%rsi),%r10d
movl $4294967295,%r11d movl $0xffffffff,%r11d
roll $21,%ebx roll $21,%ebx
xorl %edx,%r11d xorl %edx,%r11d
addl %ecx,%ebx addl %ecx,%ebx

View File

@ -22,14 +22,14 @@ L$gmult_prologue:
movq $14,%rcx movq $14,%rcx
movq 8(%rsi,%rax,1),%r8 movq 8(%rsi,%rax,1),%r8
movq (%rsi,%rax,1),%r9 movq (%rsi,%rax,1),%r9
andb $240,%bl andb $0xf0,%bl
movq %r8,%rdx movq %r8,%rdx
jmp L$oop1 jmp L$oop1
.p2align 4 .p2align 4
L$oop1: L$oop1:
shrq $4,%r8 shrq $4,%r8
andq $15,%rdx andq $0xf,%rdx
movq %r9,%r10 movq %r9,%r10
movb (%rdi,%rcx,1),%al movb (%rdi,%rcx,1),%al
shrq $4,%r9 shrq $4,%r9
@ -45,13 +45,13 @@ L$oop1:
js L$break1 js L$break1
shrq $4,%r8 shrq $4,%r8
andq $15,%rdx andq $0xf,%rdx
movq %r9,%r10 movq %r9,%r10
shrq $4,%r9 shrq $4,%r9
xorq 8(%rsi,%rax,1),%r8 xorq 8(%rsi,%rax,1),%r8
shlq $60,%r10 shlq $60,%r10
xorq (%rsi,%rax,1),%r9 xorq (%rsi,%rax,1),%r9
andb $240,%bl andb $0xf0,%bl
xorq (%r11,%rdx,8),%r9 xorq (%r11,%rdx,8),%r9
movq %r8,%rdx movq %r8,%rdx
xorq %r10,%r8 xorq %r10,%r8
@ -60,19 +60,19 @@ L$oop1:
.p2align 4 .p2align 4
L$break1: L$break1:
shrq $4,%r8 shrq $4,%r8
andq $15,%rdx andq $0xf,%rdx
movq %r9,%r10 movq %r9,%r10
shrq $4,%r9 shrq $4,%r9
xorq 8(%rsi,%rax,1),%r8 xorq 8(%rsi,%rax,1),%r8
shlq $60,%r10 shlq $60,%r10
xorq (%rsi,%rax,1),%r9 xorq (%rsi,%rax,1),%r9
andb $240,%bl andb $0xf0,%bl
xorq (%r11,%rdx,8),%r9 xorq (%r11,%rdx,8),%r9
movq %r8,%rdx movq %r8,%rdx
xorq %r10,%r8 xorq %r10,%r8
shrq $4,%r8 shrq $4,%r8
andq $15,%rdx andq $0xf,%rdx
movq %r9,%r10 movq %r9,%r10
shrq $4,%r9 shrq $4,%r9
xorq 8(%rsi,%rbx,1),%r8 xorq 8(%rsi,%rbx,1),%r8
@ -880,20 +880,20 @@ L$_ghash_clmul:
movdqu 32(%rsi),%xmm7 movdqu 32(%rsi),%xmm7
.byte 102,65,15,56,0,194 .byte 102,65,15,56,0,194
subq $16,%rcx subq $0x10,%rcx
jz L$odd_tail jz L$odd_tail
movdqu 16(%rsi),%xmm6 movdqu 16(%rsi),%xmm6
movl _OPENSSL_ia32cap_P+4(%rip),%eax movl _OPENSSL_ia32cap_P+4(%rip),%eax
cmpq $48,%rcx cmpq $0x30,%rcx
jb L$skip4x jb L$skip4x
andl $71303168,%eax andl $71303168,%eax
cmpl $4194304,%eax cmpl $4194304,%eax
je L$skip4x je L$skip4x
subq $48,%rcx subq $0x30,%rcx
movq $11547335547999543296,%rax movq $0xA040608020C0E000,%rax
movdqu 48(%rsi),%xmm14 movdqu 48(%rsi),%xmm14
movdqu 64(%rsi),%xmm15 movdqu 64(%rsi),%xmm15
@ -940,7 +940,7 @@ L$_ghash_clmul:
xorps %xmm13,%xmm5 xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx leaq 64(%rdx),%rdx
subq $64,%rcx subq $0x40,%rcx
jc L$tail4x jc L$tail4x
jmp L$mod4_loop jmp L$mod4_loop
@ -1023,7 +1023,7 @@ L$mod4_loop:
xorps %xmm13,%xmm5 xorps %xmm13,%xmm5
leaq 64(%rdx),%rdx leaq 64(%rdx),%rdx
subq $64,%rcx subq $0x40,%rcx
jnc L$mod4_loop jnc L$mod4_loop
L$tail4x: L$tail4x:
@ -1067,10 +1067,10 @@ L$tail4x:
pxor %xmm4,%xmm0 pxor %xmm4,%xmm0
psrlq $1,%xmm0 psrlq $1,%xmm0
pxor %xmm1,%xmm0 pxor %xmm1,%xmm0
addq $64,%rcx addq $0x40,%rcx
jz L$done jz L$done
movdqu 32(%rsi),%xmm7 movdqu 32(%rsi),%xmm7
subq $16,%rcx subq $0x10,%rcx
jz L$odd_tail jz L$odd_tail
L$skip4x: L$skip4x:
@ -1093,7 +1093,7 @@ L$skip4x:
leaq 32(%rdx),%rdx leaq 32(%rdx),%rdx
nop nop
subq $32,%rcx subq $0x20,%rcx
jbe L$even_tail jbe L$even_tail
nop nop
jmp L$mod_loop jmp L$mod_loop
@ -1156,7 +1156,7 @@ L$mod_loop:
.byte 102,15,58,68,231,0 .byte 102,15,58,68,231,0
pxor %xmm1,%xmm0 pxor %xmm1,%xmm0
subq $32,%rcx subq $0x20,%rcx
ja L$mod_loop ja L$mod_loop
L$even_tail: L$even_tail:

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,126 @@
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Rolls third_party/boringssl/src in DEPS and updates generated build files."""
import os
import os.path
import shutil
import subprocess
import sys
SCRIPT_PATH = os.path.abspath(__file__)
SRC_PATH = os.path.dirname(os.path.dirname(os.path.dirname(SCRIPT_PATH)))
DEPS_PATH = os.path.join(SRC_PATH, 'DEPS')
BORINGSSL_PATH = os.path.join(SRC_PATH, 'third_party', 'boringssl')
BORINGSSL_SRC_PATH = os.path.join(BORINGSSL_PATH, 'src')
if not os.path.isfile(DEPS_PATH) or not os.path.isdir(BORINGSSL_SRC_PATH):
raise Exception('Could not find Chromium checkout')
# Pull OS_ARCH_COMBOS out of the BoringSSL script.
sys.path.append(os.path.join(BORINGSSL_SRC_PATH, 'util'))
import generate_build_files
GENERATED_FILES = [
'BUILD.generated.gni',
'BUILD.generated_tests.gni',
'boringssl.gypi',
'boringssl_tests.gypi',
'err_data.c',
]
def IsPristine(repo):
"""Returns True if a git checkout is pristine."""
cmd = ['git', 'diff', '--ignore-submodules']
return not (subprocess.check_output(cmd, cwd=repo).strip() or
subprocess.check_output(cmd + ['--cached'], cwd=repo).strip())
def RevParse(repo, rev):
"""Resolves a string to a git commit."""
return subprocess.check_output(['git', 'rev-parse', rev], cwd=repo).strip()
def UpdateDEPS(deps, from_hash, to_hash):
"""Updates all references of |from_hash| to |to_hash| in |deps|."""
with open(deps, 'rb') as f:
contents = f.read()
if from_hash not in contents:
raise Exception('%s not in DEPS' % from_hash)
contents = contents.replace(from_hash, to_hash)
with open(deps, 'wb') as f:
f.write(contents)
def main():
if len(sys.argv) > 2:
sys.stderr.write('Usage: %s [COMMIT]' % sys.argv[0])
return 1
if not IsPristine(SRC_PATH):
print >>sys.stderr, 'Chromium checkout not pristine.'
return 0
if not IsPristine(BORINGSSL_SRC_PATH):
print >>sys.stderr, 'BoringSSL checkout not pristine.'
return 0
if len(sys.argv) > 1:
commit = RevParse(BORINGSSL_SRC_PATH, sys.argv[1])
else:
subprocess.check_call(['git', 'fetch', 'origin'], cwd=BORINGSSL_SRC_PATH)
commit = RevParse(BORINGSSL_SRC_PATH, 'origin/master')
head = RevParse(BORINGSSL_SRC_PATH, 'HEAD')
if head == commit:
print 'BoringSSL already up to date.'
return 0
print 'Rolling BoringSSL from %s to %s...' % (head, commit)
UpdateDEPS(DEPS_PATH, head, commit)
# Checkout third_party/boringssl/src to generate new files.
subprocess.check_call(['git', 'checkout', commit], cwd=BORINGSSL_SRC_PATH)
# Clear the old generated files.
for (osname, arch, _, _, _) in generate_build_files.OS_ARCH_COMBOS:
path = os.path.join(BORINGSSL_PATH, osname + '-' + arch)
shutil.rmtree(path)
for file in GENERATED_FILES:
path = os.path.join(BORINGSSL_PATH, file)
os.unlink(path)
# Generate new ones.
subprocess.check_call(['python',
os.path.join(BORINGSSL_SRC_PATH, 'util',
'generate_build_files.py'),
'gn', 'gyp'],
cwd=BORINGSSL_PATH)
# Commit everything.
subprocess.check_call(['git', 'add', DEPS_PATH], cwd=SRC_PATH)
for (osname, arch, _, _, _) in generate_build_files.OS_ARCH_COMBOS:
path = os.path.join(BORINGSSL_PATH, osname + '-' + arch)
subprocess.check_call(['git', 'add', path], cwd=SRC_PATH)
for file in GENERATED_FILES:
path = os.path.join(BORINGSSL_PATH, file)
subprocess.check_call(['git', 'add', path], cwd=SRC_PATH)
message = """Roll src/third_party/boringssl/src %s..%s
https://boringssl.googlesource.com/boringssl/+log/%s..%s
BUG=none
""" % (head[:9], commit[:9], head, commit)
subprocess.check_call(['git', 'commit', '-m', message], cwd=SRC_PATH)
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,977 @@
%ifidn __OUTPUT_FORMAT__,obj
section code use32 class=code align=64
%elifidn __OUTPUT_FORMAT__,win32
%ifdef __YASM_VERSION_ID__
%if __YASM_VERSION_ID__ < 01010000h
%error yasm version 1.1.0 or later needed.
%endif
; Yasm automatically includes .00 and complains about redefining it.
; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html
%else
$@feat.00 equ 1
%endif
section .text code align=64
%else
section .text code
%endif
global _ChaCha20_ctr32
align 16
_ChaCha20_ctr32:
L$_ChaCha20_ctr32_begin:
push ebp
push ebx
push esi
push edi
xor eax,eax
cmp eax,DWORD [28+esp]
je NEAR L$000no_data
call L$pic_point
L$pic_point:
pop eax
lea ebp,[_OPENSSL_ia32cap_P]
test DWORD [ebp],16777216
jz NEAR L$001x86
test DWORD [4+ebp],512
jz NEAR L$001x86
jmp NEAR L$ssse3_shortcut
L$001x86:
mov esi,DWORD [32+esp]
mov edi,DWORD [36+esp]
sub esp,132
mov eax,DWORD [esi]
mov ebx,DWORD [4+esi]
mov ecx,DWORD [8+esi]
mov edx,DWORD [12+esi]
mov DWORD [80+esp],eax
mov DWORD [84+esp],ebx
mov DWORD [88+esp],ecx
mov DWORD [92+esp],edx
mov eax,DWORD [16+esi]
mov ebx,DWORD [20+esi]
mov ecx,DWORD [24+esi]
mov edx,DWORD [28+esi]
mov DWORD [96+esp],eax
mov DWORD [100+esp],ebx
mov DWORD [104+esp],ecx
mov DWORD [108+esp],edx
mov eax,DWORD [edi]
mov ebx,DWORD [4+edi]
mov ecx,DWORD [8+edi]
mov edx,DWORD [12+edi]
sub eax,1
mov DWORD [112+esp],eax
mov DWORD [116+esp],ebx
mov DWORD [120+esp],ecx
mov DWORD [124+esp],edx
jmp NEAR L$002entry
align 16
L$003outer_loop:
mov DWORD [156+esp],ebx
mov DWORD [152+esp],eax
mov DWORD [160+esp],ecx
L$002entry:
mov eax,1634760805
mov DWORD [4+esp],857760878
mov DWORD [8+esp],2036477234
mov DWORD [12+esp],1797285236
mov ebx,DWORD [84+esp]
mov ebp,DWORD [88+esp]
mov ecx,DWORD [104+esp]
mov esi,DWORD [108+esp]
mov edx,DWORD [116+esp]
mov edi,DWORD [120+esp]
mov DWORD [20+esp],ebx
mov DWORD [24+esp],ebp
mov DWORD [40+esp],ecx
mov DWORD [44+esp],esi
mov DWORD [52+esp],edx
mov DWORD [56+esp],edi
mov ebx,DWORD [92+esp]
mov edi,DWORD [124+esp]
mov edx,DWORD [112+esp]
mov ebp,DWORD [80+esp]
mov ecx,DWORD [96+esp]
mov esi,DWORD [100+esp]
add edx,1
mov DWORD [28+esp],ebx
mov DWORD [60+esp],edi
mov DWORD [112+esp],edx
mov ebx,10
jmp NEAR L$004loop
align 16
L$004loop:
add eax,ebp
mov DWORD [128+esp],ebx
mov ebx,ebp
xor edx,eax
rol edx,16
add ecx,edx
xor ebx,ecx
mov edi,DWORD [52+esp]
rol ebx,12
mov ebp,DWORD [20+esp]
add eax,ebx
xor edx,eax
mov DWORD [esp],eax
rol edx,8
mov eax,DWORD [4+esp]
add ecx,edx
mov DWORD [48+esp],edx
xor ebx,ecx
add eax,ebp
rol ebx,7
xor edi,eax
mov DWORD [32+esp],ecx
rol edi,16
mov DWORD [16+esp],ebx
add esi,edi
mov ecx,DWORD [40+esp]
xor ebp,esi
mov edx,DWORD [56+esp]
rol ebp,12
mov ebx,DWORD [24+esp]
add eax,ebp
xor edi,eax
mov DWORD [4+esp],eax
rol edi,8
mov eax,DWORD [8+esp]
add esi,edi
mov DWORD [52+esp],edi
xor ebp,esi
add eax,ebx
rol ebp,7
xor edx,eax
mov DWORD [36+esp],esi
rol edx,16
mov DWORD [20+esp],ebp
add ecx,edx
mov esi,DWORD [44+esp]
xor ebx,ecx
mov edi,DWORD [60+esp]
rol ebx,12
mov ebp,DWORD [28+esp]
add eax,ebx
xor edx,eax
mov DWORD [8+esp],eax
rol edx,8
mov eax,DWORD [12+esp]
add ecx,edx
mov DWORD [56+esp],edx
xor ebx,ecx
add eax,ebp
rol ebx,7
xor edi,eax
rol edi,16
mov DWORD [24+esp],ebx
add esi,edi
xor ebp,esi
rol ebp,12
mov ebx,DWORD [20+esp]
add eax,ebp
xor edi,eax
mov DWORD [12+esp],eax
rol edi,8
mov eax,DWORD [esp]
add esi,edi
mov edx,edi
xor ebp,esi
add eax,ebx
rol ebp,7
xor edx,eax
rol edx,16
mov DWORD [28+esp],ebp
add ecx,edx
xor ebx,ecx
mov edi,DWORD [48+esp]
rol ebx,12
mov ebp,DWORD [24+esp]
add eax,ebx
xor edx,eax
mov DWORD [esp],eax
rol edx,8
mov eax,DWORD [4+esp]
add ecx,edx
mov DWORD [60+esp],edx
xor ebx,ecx
add eax,ebp
rol ebx,7
xor edi,eax
mov DWORD [40+esp],ecx
rol edi,16
mov DWORD [20+esp],ebx
add esi,edi
mov ecx,DWORD [32+esp]
xor ebp,esi
mov edx,DWORD [52+esp]
rol ebp,12
mov ebx,DWORD [28+esp]
add eax,ebp
xor edi,eax
mov DWORD [4+esp],eax
rol edi,8
mov eax,DWORD [8+esp]
add esi,edi
mov DWORD [48+esp],edi
xor ebp,esi
add eax,ebx
rol ebp,7
xor edx,eax
mov DWORD [44+esp],esi
rol edx,16
mov DWORD [24+esp],ebp
add ecx,edx
mov esi,DWORD [36+esp]
xor ebx,ecx
mov edi,DWORD [56+esp]
rol ebx,12
mov ebp,DWORD [16+esp]
add eax,ebx
xor edx,eax
mov DWORD [8+esp],eax
rol edx,8
mov eax,DWORD [12+esp]
add ecx,edx
mov DWORD [52+esp],edx
xor ebx,ecx
add eax,ebp
rol ebx,7
xor edi,eax
rol edi,16
mov DWORD [28+esp],ebx
add esi,edi
xor ebp,esi
mov edx,DWORD [48+esp]
rol ebp,12
mov ebx,DWORD [128+esp]
add eax,ebp
xor edi,eax
mov DWORD [12+esp],eax
rol edi,8
mov eax,DWORD [esp]
add esi,edi
mov DWORD [56+esp],edi
xor ebp,esi
rol ebp,7
dec ebx
jnz NEAR L$004loop
mov ebx,DWORD [160+esp]
add eax,1634760805
add ebp,DWORD [80+esp]
add ecx,DWORD [96+esp]
add esi,DWORD [100+esp]
cmp ebx,64
jb NEAR L$005tail
mov ebx,DWORD [156+esp]
add edx,DWORD [112+esp]
add edi,DWORD [120+esp]
xor eax,DWORD [ebx]
xor ebp,DWORD [16+ebx]
mov DWORD [esp],eax
mov eax,DWORD [152+esp]
xor ecx,DWORD [32+ebx]
xor esi,DWORD [36+ebx]
xor edx,DWORD [48+ebx]
xor edi,DWORD [56+ebx]
mov DWORD [16+eax],ebp
mov DWORD [32+eax],ecx
mov DWORD [36+eax],esi
mov DWORD [48+eax],edx
mov DWORD [56+eax],edi
mov ebp,DWORD [4+esp]
mov ecx,DWORD [8+esp]
mov esi,DWORD [12+esp]
mov edx,DWORD [20+esp]
mov edi,DWORD [24+esp]
add ebp,857760878
add ecx,2036477234
add esi,1797285236
add edx,DWORD [84+esp]
add edi,DWORD [88+esp]
xor ebp,DWORD [4+ebx]
xor ecx,DWORD [8+ebx]
xor esi,DWORD [12+ebx]
xor edx,DWORD [20+ebx]
xor edi,DWORD [24+ebx]
mov DWORD [4+eax],ebp
mov DWORD [8+eax],ecx
mov DWORD [12+eax],esi
mov DWORD [20+eax],edx
mov DWORD [24+eax],edi
mov ebp,DWORD [28+esp]
mov ecx,DWORD [40+esp]
mov esi,DWORD [44+esp]
mov edx,DWORD [52+esp]
mov edi,DWORD [60+esp]
add ebp,DWORD [92+esp]
add ecx,DWORD [104+esp]
add esi,DWORD [108+esp]
add edx,DWORD [116+esp]
add edi,DWORD [124+esp]
xor ebp,DWORD [28+ebx]
xor ecx,DWORD [40+ebx]
xor esi,DWORD [44+ebx]
xor edx,DWORD [52+ebx]
xor edi,DWORD [60+ebx]
lea ebx,[64+ebx]
mov DWORD [28+eax],ebp
mov ebp,DWORD [esp]
mov DWORD [40+eax],ecx
mov ecx,DWORD [160+esp]
mov DWORD [44+eax],esi
mov DWORD [52+eax],edx
mov DWORD [60+eax],edi
mov DWORD [eax],ebp
lea eax,[64+eax]
sub ecx,64
jnz NEAR L$003outer_loop
jmp NEAR L$006done
L$005tail:
add edx,DWORD [112+esp]
add edi,DWORD [120+esp]
mov DWORD [esp],eax
mov DWORD [16+esp],ebp
mov DWORD [32+esp],ecx
mov DWORD [36+esp],esi
mov DWORD [48+esp],edx
mov DWORD [56+esp],edi
mov ebp,DWORD [4+esp]
mov ecx,DWORD [8+esp]
mov esi,DWORD [12+esp]
mov edx,DWORD [20+esp]
mov edi,DWORD [24+esp]
add ebp,857760878
add ecx,2036477234
add esi,1797285236
add edx,DWORD [84+esp]
add edi,DWORD [88+esp]
mov DWORD [4+esp],ebp
mov DWORD [8+esp],ecx
mov DWORD [12+esp],esi
mov DWORD [20+esp],edx
mov DWORD [24+esp],edi
mov ebp,DWORD [28+esp]
mov ecx,DWORD [40+esp]
mov esi,DWORD [44+esp]
mov edx,DWORD [52+esp]
mov edi,DWORD [60+esp]
add ebp,DWORD [92+esp]
add ecx,DWORD [104+esp]
add esi,DWORD [108+esp]
add edx,DWORD [116+esp]
add edi,DWORD [124+esp]
mov DWORD [28+esp],ebp
mov ebp,DWORD [156+esp]
mov DWORD [40+esp],ecx
mov ecx,DWORD [152+esp]
mov DWORD [44+esp],esi
xor esi,esi
mov DWORD [52+esp],edx
mov DWORD [60+esp],edi
xor eax,eax
xor edx,edx
L$007tail_loop:
mov al,BYTE [ebp*1+esi]
mov dl,BYTE [esi*1+esp]
lea esi,[1+esi]
xor al,dl
mov BYTE [esi*1+ecx-1],al
dec ebx
jnz NEAR L$007tail_loop
L$006done:
add esp,132
L$000no_data:
pop edi
pop esi
pop ebx
pop ebp
ret
global _ChaCha20_ssse3
align 16
_ChaCha20_ssse3:
L$_ChaCha20_ssse3_begin:
push ebp
push ebx
push esi
push edi
L$ssse3_shortcut:
mov edi,DWORD [20+esp]
mov esi,DWORD [24+esp]
mov ecx,DWORD [28+esp]
mov edx,DWORD [32+esp]
mov ebx,DWORD [36+esp]
mov ebp,esp
sub esp,524
and esp,-64
mov DWORD [512+esp],ebp
lea eax,[(L$ssse3_data-L$pic_point)+eax]
movdqu xmm3,[ebx]
cmp ecx,256
jb NEAR L$0081x
mov DWORD [516+esp],edx
mov DWORD [520+esp],ebx
sub ecx,256
lea ebp,[384+esp]
movdqu xmm7,[edx]
pshufd xmm0,xmm3,0
pshufd xmm1,xmm3,85
pshufd xmm2,xmm3,170
pshufd xmm3,xmm3,255
paddd xmm0,[48+eax]
pshufd xmm4,xmm7,0
pshufd xmm5,xmm7,85
psubd xmm0,[64+eax]
pshufd xmm6,xmm7,170
pshufd xmm7,xmm7,255
movdqa [64+ebp],xmm0
movdqa [80+ebp],xmm1
movdqa [96+ebp],xmm2
movdqa [112+ebp],xmm3
movdqu xmm3,[16+edx]
movdqa [ebp-64],xmm4
movdqa [ebp-48],xmm5
movdqa [ebp-32],xmm6
movdqa [ebp-16],xmm7
movdqa xmm7,[32+eax]
lea ebx,[128+esp]
pshufd xmm0,xmm3,0
pshufd xmm1,xmm3,85
pshufd xmm2,xmm3,170
pshufd xmm3,xmm3,255
pshufd xmm4,xmm7,0
pshufd xmm5,xmm7,85
pshufd xmm6,xmm7,170
pshufd xmm7,xmm7,255
movdqa [ebp],xmm0
movdqa [16+ebp],xmm1
movdqa [32+ebp],xmm2
movdqa [48+ebp],xmm3
movdqa [ebp-128],xmm4
movdqa [ebp-112],xmm5
movdqa [ebp-96],xmm6
movdqa [ebp-80],xmm7
lea esi,[128+esi]
lea edi,[128+edi]
jmp NEAR L$009outer_loop
align 16
L$009outer_loop:
movdqa xmm1,[ebp-112]
movdqa xmm2,[ebp-96]
movdqa xmm3,[ebp-80]
movdqa xmm5,[ebp-48]
movdqa xmm6,[ebp-32]
movdqa xmm7,[ebp-16]
movdqa [ebx-112],xmm1
movdqa [ebx-96],xmm2
movdqa [ebx-80],xmm3
movdqa [ebx-48],xmm5
movdqa [ebx-32],xmm6
movdqa [ebx-16],xmm7
movdqa xmm2,[32+ebp]
movdqa xmm3,[48+ebp]
movdqa xmm4,[64+ebp]
movdqa xmm5,[80+ebp]
movdqa xmm6,[96+ebp]
movdqa xmm7,[112+ebp]
paddd xmm4,[64+eax]
movdqa [32+ebx],xmm2
movdqa [48+ebx],xmm3
movdqa [64+ebx],xmm4
movdqa [80+ebx],xmm5
movdqa [96+ebx],xmm6
movdqa [112+ebx],xmm7
movdqa [64+ebp],xmm4
movdqa xmm0,[ebp-128]
movdqa xmm6,xmm4
movdqa xmm3,[ebp-64]
movdqa xmm4,[ebp]
movdqa xmm5,[16+ebp]
mov edx,10
nop
align 16
L$010loop:
paddd xmm0,xmm3
movdqa xmm2,xmm3
pxor xmm6,xmm0
pshufb xmm6,[eax]
paddd xmm4,xmm6
pxor xmm2,xmm4
movdqa xmm3,[ebx-48]
movdqa xmm1,xmm2
pslld xmm2,12
psrld xmm1,20
por xmm2,xmm1
movdqa xmm1,[ebx-112]
paddd xmm0,xmm2
movdqa xmm7,[80+ebx]
pxor xmm6,xmm0
movdqa [ebx-128],xmm0
pshufb xmm6,[16+eax]
paddd xmm4,xmm6
movdqa [64+ebx],xmm6
pxor xmm2,xmm4
paddd xmm1,xmm3
movdqa xmm0,xmm2
pslld xmm2,7
psrld xmm0,25
pxor xmm7,xmm1
por xmm2,xmm0
movdqa [ebx],xmm4
pshufb xmm7,[eax]
movdqa [ebx-64],xmm2
paddd xmm5,xmm7
movdqa xmm4,[32+ebx]
pxor xmm3,xmm5
movdqa xmm2,[ebx-32]
movdqa xmm0,xmm3
pslld xmm3,12
psrld xmm0,20
por xmm3,xmm0
movdqa xmm0,[ebx-96]
paddd xmm1,xmm3
movdqa xmm6,[96+ebx]
pxor xmm7,xmm1
movdqa [ebx-112],xmm1
pshufb xmm7,[16+eax]
paddd xmm5,xmm7
movdqa [80+ebx],xmm7
pxor xmm3,xmm5
paddd xmm0,xmm2
movdqa xmm1,xmm3
pslld xmm3,7
psrld xmm1,25
pxor xmm6,xmm0
por xmm3,xmm1
movdqa [16+ebx],xmm5
pshufb xmm6,[eax]
movdqa [ebx-48],xmm3
paddd xmm4,xmm6
movdqa xmm5,[48+ebx]
pxor xmm2,xmm4
movdqa xmm3,[ebx-16]
movdqa xmm1,xmm2
pslld xmm2,12
psrld xmm1,20
por xmm2,xmm1
movdqa xmm1,[ebx-80]
paddd xmm0,xmm2
movdqa xmm7,[112+ebx]
pxor xmm6,xmm0
movdqa [ebx-96],xmm0
pshufb xmm6,[16+eax]
paddd xmm4,xmm6
movdqa [96+ebx],xmm6
pxor xmm2,xmm4
paddd xmm1,xmm3
movdqa xmm0,xmm2
pslld xmm2,7
psrld xmm0,25
pxor xmm7,xmm1
por xmm2,xmm0
pshufb xmm7,[eax]
movdqa [ebx-32],xmm2
paddd xmm5,xmm7
pxor xmm3,xmm5
movdqa xmm2,[ebx-48]
movdqa xmm0,xmm3
pslld xmm3,12
psrld xmm0,20
por xmm3,xmm0
movdqa xmm0,[ebx-128]
paddd xmm1,xmm3
pxor xmm7,xmm1
movdqa [ebx-80],xmm1
pshufb xmm7,[16+eax]
paddd xmm5,xmm7
movdqa xmm6,xmm7
pxor xmm3,xmm5
paddd xmm0,xmm2
movdqa xmm1,xmm3
pslld xmm3,7
psrld xmm1,25
pxor xmm6,xmm0
por xmm3,xmm1
pshufb xmm6,[eax]
movdqa [ebx-16],xmm3
paddd xmm4,xmm6
pxor xmm2,xmm4
movdqa xmm3,[ebx-32]
movdqa xmm1,xmm2
pslld xmm2,12
psrld xmm1,20
por xmm2,xmm1
movdqa xmm1,[ebx-112]
paddd xmm0,xmm2
movdqa xmm7,[64+ebx]
pxor xmm6,xmm0
movdqa [ebx-128],xmm0
pshufb xmm6,[16+eax]
paddd xmm4,xmm6
movdqa [112+ebx],xmm6
pxor xmm2,xmm4
paddd xmm1,xmm3
movdqa xmm0,xmm2
pslld xmm2,7
psrld xmm0,25
pxor xmm7,xmm1
por xmm2,xmm0
movdqa [32+ebx],xmm4
pshufb xmm7,[eax]
movdqa [ebx-48],xmm2
paddd xmm5,xmm7
movdqa xmm4,[ebx]
pxor xmm3,xmm5
movdqa xmm2,[ebx-16]
movdqa xmm0,xmm3
pslld xmm3,12
psrld xmm0,20
por xmm3,xmm0
movdqa xmm0,[ebx-96]
paddd xmm1,xmm3
movdqa xmm6,[80+ebx]
pxor xmm7,xmm1
movdqa [ebx-112],xmm1
pshufb xmm7,[16+eax]
paddd xmm5,xmm7
movdqa [64+ebx],xmm7
pxor xmm3,xmm5
paddd xmm0,xmm2
movdqa xmm1,xmm3
pslld xmm3,7
psrld xmm1,25
pxor xmm6,xmm0
por xmm3,xmm1
movdqa [48+ebx],xmm5
pshufb xmm6,[eax]
movdqa [ebx-32],xmm3
paddd xmm4,xmm6
movdqa xmm5,[16+ebx]
pxor xmm2,xmm4
movdqa xmm3,[ebx-64]
movdqa xmm1,xmm2
pslld xmm2,12
psrld xmm1,20
por xmm2,xmm1
movdqa xmm1,[ebx-80]
paddd xmm0,xmm2
movdqa xmm7,[96+ebx]
pxor xmm6,xmm0
movdqa [ebx-96],xmm0
pshufb xmm6,[16+eax]
paddd xmm4,xmm6
movdqa [80+ebx],xmm6
pxor xmm2,xmm4
paddd xmm1,xmm3
movdqa xmm0,xmm2
pslld xmm2,7
psrld xmm0,25
pxor xmm7,xmm1
por xmm2,xmm0
pshufb xmm7,[eax]
movdqa [ebx-16],xmm2
paddd xmm5,xmm7
pxor xmm3,xmm5
movdqa xmm0,xmm3
pslld xmm3,12
psrld xmm0,20
por xmm3,xmm0
movdqa xmm0,[ebx-128]
paddd xmm1,xmm3
movdqa xmm6,[64+ebx]
pxor xmm7,xmm1
movdqa [ebx-80],xmm1
pshufb xmm7,[16+eax]
paddd xmm5,xmm7
movdqa [96+ebx],xmm7
pxor xmm3,xmm5
movdqa xmm1,xmm3
pslld xmm3,7
psrld xmm1,25
por xmm3,xmm1
dec edx
jnz NEAR L$010loop
movdqa [ebx-64],xmm3
movdqa [ebx],xmm4
movdqa [16+ebx],xmm5
movdqa [64+ebx],xmm6
movdqa [96+ebx],xmm7
movdqa xmm1,[ebx-112]
movdqa xmm2,[ebx-96]
movdqa xmm3,[ebx-80]
paddd xmm0,[ebp-128]
paddd xmm1,[ebp-112]
paddd xmm2,[ebp-96]
paddd xmm3,[ebp-80]
movdqa xmm6,xmm0
punpckldq xmm0,xmm1
movdqa xmm7,xmm2
punpckldq xmm2,xmm3
punpckhdq xmm6,xmm1
punpckhdq xmm7,xmm3
movdqa xmm1,xmm0
punpcklqdq xmm0,xmm2
movdqa xmm3,xmm6
punpcklqdq xmm6,xmm7
punpckhqdq xmm1,xmm2
punpckhqdq xmm3,xmm7
movdqu xmm4,[esi-128]
movdqu xmm5,[esi-64]
movdqu xmm2,[esi]
movdqu xmm7,[64+esi]
lea esi,[16+esi]
pxor xmm4,xmm0
movdqa xmm0,[ebx-64]
pxor xmm5,xmm1
movdqa xmm1,[ebx-48]
pxor xmm6,xmm2
movdqa xmm2,[ebx-32]
pxor xmm7,xmm3
movdqa xmm3,[ebx-16]
movdqu [edi-128],xmm4
movdqu [edi-64],xmm5
movdqu [edi],xmm6
movdqu [64+edi],xmm7
lea edi,[16+edi]
paddd xmm0,[ebp-64]
paddd xmm1,[ebp-48]
paddd xmm2,[ebp-32]
paddd xmm3,[ebp-16]
movdqa xmm6,xmm0
punpckldq xmm0,xmm1
movdqa xmm7,xmm2
punpckldq xmm2,xmm3
punpckhdq xmm6,xmm1
punpckhdq xmm7,xmm3
movdqa xmm1,xmm0
punpcklqdq xmm0,xmm2
movdqa xmm3,xmm6
punpcklqdq xmm6,xmm7
punpckhqdq xmm1,xmm2
punpckhqdq xmm3,xmm7
movdqu xmm4,[esi-128]
movdqu xmm5,[esi-64]
movdqu xmm2,[esi]
movdqu xmm7,[64+esi]
lea esi,[16+esi]
pxor xmm4,xmm0
movdqa xmm0,[ebx]
pxor xmm5,xmm1
movdqa xmm1,[16+ebx]
pxor xmm6,xmm2
movdqa xmm2,[32+ebx]
pxor xmm7,xmm3
movdqa xmm3,[48+ebx]
movdqu [edi-128],xmm4
movdqu [edi-64],xmm5
movdqu [edi],xmm6
movdqu [64+edi],xmm7
lea edi,[16+edi]
paddd xmm0,[ebp]
paddd xmm1,[16+ebp]
paddd xmm2,[32+ebp]
paddd xmm3,[48+ebp]
movdqa xmm6,xmm0
punpckldq xmm0,xmm1
movdqa xmm7,xmm2
punpckldq xmm2,xmm3
punpckhdq xmm6,xmm1
punpckhdq xmm7,xmm3
movdqa xmm1,xmm0
punpcklqdq xmm0,xmm2
movdqa xmm3,xmm6
punpcklqdq xmm6,xmm7
punpckhqdq xmm1,xmm2
punpckhqdq xmm3,xmm7
movdqu xmm4,[esi-128]
movdqu xmm5,[esi-64]
movdqu xmm2,[esi]
movdqu xmm7,[64+esi]
lea esi,[16+esi]
pxor xmm4,xmm0
movdqa xmm0,[64+ebx]
pxor xmm5,xmm1
movdqa xmm1,[80+ebx]
pxor xmm6,xmm2
movdqa xmm2,[96+ebx]
pxor xmm7,xmm3
movdqa xmm3,[112+ebx]
movdqu [edi-128],xmm4
movdqu [edi-64],xmm5
movdqu [edi],xmm6
movdqu [64+edi],xmm7
lea edi,[16+edi]
paddd xmm0,[64+ebp]
paddd xmm1,[80+ebp]
paddd xmm2,[96+ebp]
paddd xmm3,[112+ebp]
movdqa xmm6,xmm0
punpckldq xmm0,xmm1
movdqa xmm7,xmm2
punpckldq xmm2,xmm3
punpckhdq xmm6,xmm1
punpckhdq xmm7,xmm3
movdqa xmm1,xmm0
punpcklqdq xmm0,xmm2
movdqa xmm3,xmm6
punpcklqdq xmm6,xmm7
punpckhqdq xmm1,xmm2
punpckhqdq xmm3,xmm7
movdqu xmm4,[esi-128]
movdqu xmm5,[esi-64]
movdqu xmm2,[esi]
movdqu xmm7,[64+esi]
lea esi,[208+esi]
pxor xmm4,xmm0
pxor xmm5,xmm1
pxor xmm6,xmm2
pxor xmm7,xmm3
movdqu [edi-128],xmm4
movdqu [edi-64],xmm5
movdqu [edi],xmm6
movdqu [64+edi],xmm7
lea edi,[208+edi]
sub ecx,256
jnc NEAR L$009outer_loop
add ecx,256
jz NEAR L$011done
mov ebx,DWORD [520+esp]
lea esi,[esi-128]
mov edx,DWORD [516+esp]
lea edi,[edi-128]
movd xmm2,DWORD [64+ebp]
movdqu xmm3,[ebx]
paddd xmm2,[96+eax]
pand xmm3,[112+eax]
por xmm3,xmm2
L$0081x:
movdqa xmm0,[32+eax]
movdqu xmm1,[edx]
movdqu xmm2,[16+edx]
movdqa xmm6,[eax]
movdqa xmm7,[16+eax]
mov DWORD [48+esp],ebp
movdqa [esp],xmm0
movdqa [16+esp],xmm1
movdqa [32+esp],xmm2
movdqa [48+esp],xmm3
mov edx,10
jmp NEAR L$012loop1x
align 16
L$013outer1x:
movdqa xmm3,[80+eax]
movdqa xmm0,[esp]
movdqa xmm1,[16+esp]
movdqa xmm2,[32+esp]
paddd xmm3,[48+esp]
mov edx,10
movdqa [48+esp],xmm3
jmp NEAR L$012loop1x
align 16
L$012loop1x:
paddd xmm0,xmm1
pxor xmm3,xmm0
db 102,15,56,0,222
paddd xmm2,xmm3
pxor xmm1,xmm2
movdqa xmm4,xmm1
psrld xmm1,20
pslld xmm4,12
por xmm1,xmm4
paddd xmm0,xmm1
pxor xmm3,xmm0
db 102,15,56,0,223
paddd xmm2,xmm3
pxor xmm1,xmm2
movdqa xmm4,xmm1
psrld xmm1,25
pslld xmm4,7
por xmm1,xmm4
pshufd xmm2,xmm2,78
pshufd xmm1,xmm1,57
pshufd xmm3,xmm3,147
nop
paddd xmm0,xmm1
pxor xmm3,xmm0
db 102,15,56,0,222
paddd xmm2,xmm3
pxor xmm1,xmm2
movdqa xmm4,xmm1
psrld xmm1,20
pslld xmm4,12
por xmm1,xmm4
paddd xmm0,xmm1
pxor xmm3,xmm0
db 102,15,56,0,223
paddd xmm2,xmm3
pxor xmm1,xmm2
movdqa xmm4,xmm1
psrld xmm1,25
pslld xmm4,7
por xmm1,xmm4
pshufd xmm2,xmm2,78
pshufd xmm1,xmm1,147
pshufd xmm3,xmm3,57
dec edx
jnz NEAR L$012loop1x
paddd xmm0,[esp]
paddd xmm1,[16+esp]
paddd xmm2,[32+esp]
paddd xmm3,[48+esp]
cmp ecx,64
jb NEAR L$014tail
movdqu xmm4,[esi]
movdqu xmm5,[16+esi]
pxor xmm0,xmm4
movdqu xmm4,[32+esi]
pxor xmm1,xmm5
movdqu xmm5,[48+esi]
pxor xmm2,xmm4
pxor xmm3,xmm5
lea esi,[64+esi]
movdqu [edi],xmm0
movdqu [16+edi],xmm1
movdqu [32+edi],xmm2
movdqu [48+edi],xmm3
lea edi,[64+edi]
sub ecx,64
jnz NEAR L$013outer1x
jmp NEAR L$011done
L$014tail:
movdqa [esp],xmm0
movdqa [16+esp],xmm1
movdqa [32+esp],xmm2
movdqa [48+esp],xmm3
xor eax,eax
xor edx,edx
xor ebp,ebp
L$015tail_loop:
mov al,BYTE [ebp*1+esp]
mov dl,BYTE [ebp*1+esi]
lea ebp,[1+ebp]
xor al,dl
mov BYTE [ebp*1+edi-1],al
dec ecx
jnz NEAR L$015tail_loop
L$011done:
mov esp,DWORD [512+esp]
pop edi
pop esi
pop ebx
pop ebp
ret
align 64
L$ssse3_data:
db 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
db 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
dd 1634760805,857760878,2036477234,1797285236
dd 0,1,2,3
dd 4,4,4,4
dd 1,0,0,0
dd 4,0,0,0
dd 0,-1,-1,-1
align 64
db 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54
db 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32
db 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111
db 114,103,62,0
segment .bss
common _OPENSSL_ia32cap_P 16

View File

@ -349,34 +349,5 @@ L$013exit:
pop ebx pop ebx
pop ebp pop ebp
ret ret
global _RC4_options
align 16
_RC4_options:
L$_RC4_options_begin:
call L$016pic_point
L$016pic_point:
pop eax
lea eax,[(L$017opts-L$016pic_point)+eax]
lea edx,[_OPENSSL_ia32cap_P]
mov edx,DWORD [edx]
bt edx,20
jc NEAR L$0181xchar
bt edx,26
jnc NEAR L$019ret
add eax,25
ret
L$0181xchar:
add eax,12
L$019ret:
ret
align 64
L$017opts:
db 114,99,52,40,52,120,44,105,110,116,41,0
db 114,99,52,40,49,120,44,99,104,97,114,41,0
db 114,99,52,40,56,120,44,109,109,120,41,0
db 82,67,52,32,102,111,114,32,120,56,54,44,32,67,82,89
db 80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114
db 111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
align 64
segment .bss segment .bss
common _OPENSSL_ia32cap_P 16 common _OPENSSL_ia32cap_P 16

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -504,48 +504,104 @@ $L$SEH_begin_rsaz_512_mul_gather4:
push r14 push r14
push r15 push r15
mov r9d,r9d sub rsp,328
sub rsp,128+24 movaps XMMWORD[160+rsp],xmm6
movaps XMMWORD[176+rsp],xmm7
movaps XMMWORD[192+rsp],xmm8
movaps XMMWORD[208+rsp],xmm9
movaps XMMWORD[224+rsp],xmm10
movaps XMMWORD[240+rsp],xmm11
movaps XMMWORD[256+rsp],xmm12
movaps XMMWORD[272+rsp],xmm13
movaps XMMWORD[288+rsp],xmm14
movaps XMMWORD[304+rsp],xmm15
$L$mul_gather4_body: $L$mul_gather4_body:
mov eax,DWORD[64+r9*4+rdx] movd xmm8,r9d
DB 102,72,15,110,199 movdqa xmm1,XMMWORD[(($L$inc+16))]
mov ebx,DWORD[r9*4+rdx] movdqa xmm0,XMMWORD[$L$inc]
DB 102,72,15,110,201
mov QWORD[128+rsp],r8 pshufd xmm8,xmm8,0
movdqa xmm7,xmm1
movdqa xmm2,xmm1
paddd xmm1,xmm0
pcmpeqd xmm0,xmm8
movdqa xmm3,xmm7
paddd xmm2,xmm1
pcmpeqd xmm1,xmm8
movdqa xmm4,xmm7
paddd xmm3,xmm2
pcmpeqd xmm2,xmm8
movdqa xmm5,xmm7
paddd xmm4,xmm3
pcmpeqd xmm3,xmm8
movdqa xmm6,xmm7
paddd xmm5,xmm4
pcmpeqd xmm4,xmm8
paddd xmm6,xmm5
pcmpeqd xmm5,xmm8
paddd xmm7,xmm6
pcmpeqd xmm6,xmm8
pcmpeqd xmm7,xmm8
movdqa xmm8,XMMWORD[rdx]
movdqa xmm9,XMMWORD[16+rdx]
movdqa xmm10,XMMWORD[32+rdx]
movdqa xmm11,XMMWORD[48+rdx]
pand xmm8,xmm0
movdqa xmm12,XMMWORD[64+rdx]
pand xmm9,xmm1
movdqa xmm13,XMMWORD[80+rdx]
pand xmm10,xmm2
movdqa xmm14,XMMWORD[96+rdx]
pand xmm11,xmm3
movdqa xmm15,XMMWORD[112+rdx]
lea rbp,[128+rdx]
pand xmm12,xmm4
pand xmm13,xmm5
pand xmm14,xmm6
pand xmm15,xmm7
por xmm8,xmm10
por xmm9,xmm11
por xmm8,xmm12
por xmm9,xmm13
por xmm8,xmm14
por xmm9,xmm15
por xmm8,xmm9
pshufd xmm9,xmm8,0x4e
por xmm8,xmm9
DB 102,76,15,126,195
mov QWORD[128+rsp],r8
mov QWORD[((128+8))+rsp],rdi
mov QWORD[((128+16))+rsp],rcx
shl rax,32
or rbx,rax
mov rax,QWORD[rsi] mov rax,QWORD[rsi]
mov rcx,QWORD[8+rsi] mov rcx,QWORD[8+rsi]
lea rbp,[128+r9*4+rdx]
mul rbx mul rbx
mov QWORD[rsp],rax mov QWORD[rsp],rax
mov rax,rcx mov rax,rcx
mov r8,rdx mov r8,rdx
mul rbx mul rbx
movd xmm4,DWORD[rbp]
add r8,rax add r8,rax
mov rax,QWORD[16+rsi] mov rax,QWORD[16+rsi]
mov r9,rdx mov r9,rdx
adc r9,0 adc r9,0
mul rbx mul rbx
movd xmm5,DWORD[64+rbp]
add r9,rax add r9,rax
mov rax,QWORD[24+rsi] mov rax,QWORD[24+rsi]
mov r10,rdx mov r10,rdx
adc r10,0 adc r10,0
mul rbx mul rbx
pslldq xmm5,4
add r10,rax add r10,rax
mov rax,QWORD[32+rsi] mov rax,QWORD[32+rsi]
mov r11,rdx mov r11,rdx
adc r11,0 adc r11,0
mul rbx mul rbx
por xmm4,xmm5
add r11,rax add r11,rax
mov rax,QWORD[40+rsi] mov rax,QWORD[40+rsi]
mov r12,rdx mov r12,rdx
@ -558,14 +614,12 @@ DB 102,72,15,110,201
adc r13,0 adc r13,0
mul rbx mul rbx
lea rbp,[128+rbp]
add r13,rax add r13,rax
mov rax,QWORD[56+rsi] mov rax,QWORD[56+rsi]
mov r14,rdx mov r14,rdx
adc r14,0 adc r14,0
mul rbx mul rbx
DB 102,72,15,126,227
add r14,rax add r14,rax
mov rax,QWORD[rsi] mov rax,QWORD[rsi]
mov r15,rdx mov r15,rdx
@ -577,6 +631,35 @@ DB 102,72,15,126,227
ALIGN 32 ALIGN 32
$L$oop_mul_gather: $L$oop_mul_gather:
movdqa xmm8,XMMWORD[rbp]
movdqa xmm9,XMMWORD[16+rbp]
movdqa xmm10,XMMWORD[32+rbp]
movdqa xmm11,XMMWORD[48+rbp]
pand xmm8,xmm0
movdqa xmm12,XMMWORD[64+rbp]
pand xmm9,xmm1
movdqa xmm13,XMMWORD[80+rbp]
pand xmm10,xmm2
movdqa xmm14,XMMWORD[96+rbp]
pand xmm11,xmm3
movdqa xmm15,XMMWORD[112+rbp]
lea rbp,[128+rbp]
pand xmm12,xmm4
pand xmm13,xmm5
pand xmm14,xmm6
pand xmm15,xmm7
por xmm8,xmm10
por xmm9,xmm11
por xmm8,xmm12
por xmm9,xmm13
por xmm8,xmm14
por xmm9,xmm15
por xmm8,xmm9
pshufd xmm9,xmm8,0x4e
por xmm8,xmm9
DB 102,76,15,126,195
mul rbx mul rbx
add r8,rax add r8,rax
mov rax,QWORD[8+rsi] mov rax,QWORD[8+rsi]
@ -585,7 +668,6 @@ $L$oop_mul_gather:
adc r8,0 adc r8,0
mul rbx mul rbx
movd xmm4,DWORD[rbp]
add r9,rax add r9,rax
mov rax,QWORD[16+rsi] mov rax,QWORD[16+rsi]
adc rdx,0 adc rdx,0
@ -594,7 +676,6 @@ $L$oop_mul_gather:
adc r9,0 adc r9,0
mul rbx mul rbx
movd xmm5,DWORD[64+rbp]
add r10,rax add r10,rax
mov rax,QWORD[24+rsi] mov rax,QWORD[24+rsi]
adc rdx,0 adc rdx,0
@ -603,7 +684,6 @@ $L$oop_mul_gather:
adc r10,0 adc r10,0
mul rbx mul rbx
pslldq xmm5,4
add r11,rax add r11,rax
mov rax,QWORD[32+rsi] mov rax,QWORD[32+rsi]
adc rdx,0 adc rdx,0
@ -612,7 +692,6 @@ $L$oop_mul_gather:
adc r11,0 adc r11,0
mul rbx mul rbx
por xmm4,xmm5
add r12,rax add r12,rax
mov rax,QWORD[40+rsi] mov rax,QWORD[40+rsi]
adc rdx,0 adc rdx,0
@ -637,7 +716,6 @@ $L$oop_mul_gather:
adc r14,0 adc r14,0
mul rbx mul rbx
DB 102,72,15,126,227
add r15,rax add r15,rax
mov rax,QWORD[rsi] mov rax,QWORD[rsi]
adc rdx,0 adc rdx,0
@ -645,7 +723,6 @@ DB 102,72,15,126,227
mov r15,rdx mov r15,rdx
adc r15,0 adc r15,0
lea rbp,[128+rbp]
lea rdi,[8+rdi] lea rdi,[8+rdi]
dec ecx dec ecx
@ -660,8 +737,8 @@ DB 102,72,15,126,227
mov QWORD[48+rdi],r14 mov QWORD[48+rdi],r14
mov QWORD[56+rdi],r15 mov QWORD[56+rdi],r15
DB 102,72,15,126,199 mov rdi,QWORD[((128+8))+rsp]
DB 102,72,15,126,205 mov rbp,QWORD[((128+16))+rsp]
mov r8,QWORD[rsp] mov r8,QWORD[rsp]
mov r9,QWORD[8+rsp] mov r9,QWORD[8+rsp]
@ -686,6 +763,17 @@ DB 102,72,15,126,205
call __rsaz_512_subtract call __rsaz_512_subtract
lea rax,[((128+24+48))+rsp] lea rax,[((128+24+48))+rsp]
movaps xmm6,XMMWORD[((160-200))+rax]
movaps xmm7,XMMWORD[((176-200))+rax]
movaps xmm8,XMMWORD[((192-200))+rax]
movaps xmm9,XMMWORD[((208-200))+rax]
movaps xmm10,XMMWORD[((224-200))+rax]
movaps xmm11,XMMWORD[((240-200))+rax]
movaps xmm12,XMMWORD[((256-200))+rax]
movaps xmm13,XMMWORD[((272-200))+rax]
movaps xmm14,XMMWORD[((288-200))+rax]
movaps xmm15,XMMWORD[((304-200))+rax]
lea rax,[176+rax]
mov r15,QWORD[((-48))+rax] mov r15,QWORD[((-48))+rax]
mov r14,QWORD[((-40))+rax] mov r14,QWORD[((-40))+rax]
mov r13,QWORD[((-32))+rax] mov r13,QWORD[((-32))+rax]
@ -724,7 +812,7 @@ $L$SEH_begin_rsaz_512_mul_scatter4:
mov r9d,r9d mov r9d,r9d
sub rsp,128+24 sub rsp,128+24
$L$mul_scatter4_body: $L$mul_scatter4_body:
lea r8,[r9*4+r8] lea r8,[r9*8+r8]
DB 102,72,15,110,199 DB 102,72,15,110,199
DB 102,72,15,110,202 DB 102,72,15,110,202
DB 102,73,15,110,208 DB 102,73,15,110,208
@ -760,30 +848,14 @@ DB 102,72,15,126,214
call __rsaz_512_subtract call __rsaz_512_subtract
mov DWORD[rsi],r8d mov QWORD[rsi],r8
shr r8,32 mov QWORD[128+rsi],r9
mov DWORD[128+rsi],r9d mov QWORD[256+rsi],r10
shr r9,32 mov QWORD[384+rsi],r11
mov DWORD[256+rsi],r10d mov QWORD[512+rsi],r12
shr r10,32 mov QWORD[640+rsi],r13
mov DWORD[384+rsi],r11d mov QWORD[768+rsi],r14
shr r11,32 mov QWORD[896+rsi],r15
mov DWORD[512+rsi],r12d
shr r12,32
mov DWORD[640+rsi],r13d
shr r13,32
mov DWORD[768+rsi],r14d
shr r14,32
mov DWORD[896+rsi],r15d
shr r15,32
mov DWORD[64+rsi],r8d
mov DWORD[192+rsi],r9d
mov DWORD[320+rsi],r10d
mov DWORD[448+rsi],r11d
mov DWORD[576+rsi],r12d
mov DWORD[704+rsi],r13d
mov DWORD[832+rsi],r14d
mov DWORD[960+rsi],r15d
lea rax,[((128+24+48))+rsp] lea rax,[((128+24+48))+rsp]
mov r15,QWORD[((-48))+rax] mov r15,QWORD[((-48))+rax]
@ -1150,16 +1222,14 @@ global rsaz_512_scatter4
ALIGN 16 ALIGN 16
rsaz_512_scatter4: rsaz_512_scatter4:
lea rcx,[r8*4+rcx] lea rcx,[r8*8+rcx]
mov r9d,8 mov r9d,8
jmp NEAR $L$oop_scatter jmp NEAR $L$oop_scatter
ALIGN 16 ALIGN 16
$L$oop_scatter: $L$oop_scatter:
mov rax,QWORD[rdx] mov rax,QWORD[rdx]
lea rdx,[8+rdx] lea rdx,[8+rdx]
mov DWORD[rcx],eax mov QWORD[rcx],rax
shr rax,32
mov DWORD[64+rcx],eax
lea rcx,[128+rcx] lea rcx,[128+rcx]
dec r9d dec r9d
jnz NEAR $L$oop_scatter jnz NEAR $L$oop_scatter
@ -1170,22 +1240,98 @@ global rsaz_512_gather4
ALIGN 16 ALIGN 16
rsaz_512_gather4: rsaz_512_gather4:
lea rdx,[r8*4+rdx] $L$SEH_begin_rsaz_512_gather4:
DB 0x48,0x81,0xec,0xa8,0x00,0x00,0x00
DB 0x0f,0x29,0x34,0x24
DB 0x0f,0x29,0x7c,0x24,0x10
DB 0x44,0x0f,0x29,0x44,0x24,0x20
DB 0x44,0x0f,0x29,0x4c,0x24,0x30
DB 0x44,0x0f,0x29,0x54,0x24,0x40
DB 0x44,0x0f,0x29,0x5c,0x24,0x50
DB 0x44,0x0f,0x29,0x64,0x24,0x60
DB 0x44,0x0f,0x29,0x6c,0x24,0x70
DB 0x44,0x0f,0x29,0xb4,0x24,0x80,0,0,0
DB 0x44,0x0f,0x29,0xbc,0x24,0x90,0,0,0
movd xmm8,r8d
movdqa xmm1,XMMWORD[(($L$inc+16))]
movdqa xmm0,XMMWORD[$L$inc]
pshufd xmm8,xmm8,0
movdqa xmm7,xmm1
movdqa xmm2,xmm1
paddd xmm1,xmm0
pcmpeqd xmm0,xmm8
movdqa xmm3,xmm7
paddd xmm2,xmm1
pcmpeqd xmm1,xmm8
movdqa xmm4,xmm7
paddd xmm3,xmm2
pcmpeqd xmm2,xmm8
movdqa xmm5,xmm7
paddd xmm4,xmm3
pcmpeqd xmm3,xmm8
movdqa xmm6,xmm7
paddd xmm5,xmm4
pcmpeqd xmm4,xmm8
paddd xmm6,xmm5
pcmpeqd xmm5,xmm8
paddd xmm7,xmm6
pcmpeqd xmm6,xmm8
pcmpeqd xmm7,xmm8
mov r9d,8 mov r9d,8
jmp NEAR $L$oop_gather jmp NEAR $L$oop_gather
ALIGN 16 ALIGN 16
$L$oop_gather: $L$oop_gather:
mov eax,DWORD[rdx] movdqa xmm8,XMMWORD[rdx]
mov r8d,DWORD[64+rdx] movdqa xmm9,XMMWORD[16+rdx]
movdqa xmm10,XMMWORD[32+rdx]
movdqa xmm11,XMMWORD[48+rdx]
pand xmm8,xmm0
movdqa xmm12,XMMWORD[64+rdx]
pand xmm9,xmm1
movdqa xmm13,XMMWORD[80+rdx]
pand xmm10,xmm2
movdqa xmm14,XMMWORD[96+rdx]
pand xmm11,xmm3
movdqa xmm15,XMMWORD[112+rdx]
lea rdx,[128+rdx] lea rdx,[128+rdx]
shl r8,32 pand xmm12,xmm4
or rax,r8 pand xmm13,xmm5
mov QWORD[rcx],rax pand xmm14,xmm6
pand xmm15,xmm7
por xmm8,xmm10
por xmm9,xmm11
por xmm8,xmm12
por xmm9,xmm13
por xmm8,xmm14
por xmm9,xmm15
por xmm8,xmm9
pshufd xmm9,xmm8,0x4e
por xmm8,xmm9
movq QWORD[rcx],xmm8
lea rcx,[8+rcx] lea rcx,[8+rcx]
dec r9d dec r9d
jnz NEAR $L$oop_gather jnz NEAR $L$oop_gather
movaps xmm6,XMMWORD[rsp]
movaps xmm7,XMMWORD[16+rsp]
movaps xmm8,XMMWORD[32+rsp]
movaps xmm9,XMMWORD[48+rsp]
movaps xmm10,XMMWORD[64+rsp]
movaps xmm11,XMMWORD[80+rsp]
movaps xmm12,XMMWORD[96+rsp]
movaps xmm13,XMMWORD[112+rsp]
movaps xmm14,XMMWORD[128+rsp]
movaps xmm15,XMMWORD[144+rsp]
add rsp,0xa8
DB 0F3h,0C3h ;repret DB 0F3h,0C3h ;repret
$L$SEH_end_rsaz_512_gather4:
ALIGN 64
$L$inc:
DD 0,0,1,1
DD 2,2,2,2
EXTERN __imp_RtlVirtualUnwind EXTERN __imp_RtlVirtualUnwind
ALIGN 16 ALIGN 16
@ -1221,6 +1367,18 @@ se_handler:
lea rax,[((128+24+48))+rax] lea rax,[((128+24+48))+rax]
lea rbx,[$L$mul_gather4_epilogue]
cmp rbx,r10
jne NEAR $L$se_not_in_mul_gather4
lea rax,[176+rax]
lea rsi,[((-48-168))+rax]
lea rdi,[512+r8]
mov ecx,20
DD 0xa548f3fc
$L$se_not_in_mul_gather4:
mov rbx,QWORD[((-8))+rax] mov rbx,QWORD[((-8))+rax]
mov rbp,QWORD[((-16))+rax] mov rbp,QWORD[((-16))+rax]
mov r12,QWORD[((-24))+rax] mov r12,QWORD[((-24))+rax]
@ -1296,6 +1454,10 @@ ALIGN 4
DD $L$SEH_end_rsaz_512_mul_by_one wrt ..imagebase DD $L$SEH_end_rsaz_512_mul_by_one wrt ..imagebase
DD $L$SEH_info_rsaz_512_mul_by_one wrt ..imagebase DD $L$SEH_info_rsaz_512_mul_by_one wrt ..imagebase
DD $L$SEH_begin_rsaz_512_gather4 wrt ..imagebase
DD $L$SEH_end_rsaz_512_gather4 wrt ..imagebase
DD $L$SEH_info_rsaz_512_gather4 wrt ..imagebase
section .xdata rdata align=8 section .xdata rdata align=8
ALIGN 8 ALIGN 8
$L$SEH_info_rsaz_512_sqr: $L$SEH_info_rsaz_512_sqr:
@ -1318,3 +1480,16 @@ $L$SEH_info_rsaz_512_mul_by_one:
DB 9,0,0,0 DB 9,0,0,0
DD se_handler wrt ..imagebase DD se_handler wrt ..imagebase
DD $L$mul_by_one_body wrt ..imagebase,$L$mul_by_one_epilogue wrt ..imagebase DD $L$mul_by_one_body wrt ..imagebase,$L$mul_by_one_epilogue wrt ..imagebase
$L$SEH_info_rsaz_512_gather4:
DB 0x01,0x46,0x16,0x00
DB 0x46,0xf8,0x09,0x00
DB 0x3d,0xe8,0x08,0x00
DB 0x34,0xd8,0x07,0x00
DB 0x2e,0xc8,0x06,0x00
DB 0x28,0xb8,0x05,0x00
DB 0x22,0xa8,0x04,0x00
DB 0x1c,0x98,0x03,0x00
DB 0x16,0x88,0x02,0x00
DB 0x10,0x78,0x01,0x00
DB 0x0b,0x68,0x00,0x00
DB 0x07,0x01,0x15,0x00

View File

@ -677,20 +677,20 @@ $L$sqr8x_enter:
lea r11,[((-64))+r9*4+rsp] lea r11,[((-64))+r9*2+rsp]
mov r8,QWORD[r8] mov r8,QWORD[r8]
sub r11,rsi sub r11,rsi
and r11,4095 and r11,4095
cmp r10,r11 cmp r10,r11
jb NEAR $L$sqr8x_sp_alt jb NEAR $L$sqr8x_sp_alt
sub rsp,r11 sub rsp,r11
lea rsp,[((-64))+r9*4+rsp] lea rsp,[((-64))+r9*2+rsp]
jmp NEAR $L$sqr8x_sp_done jmp NEAR $L$sqr8x_sp_done
ALIGN 32 ALIGN 32
$L$sqr8x_sp_alt: $L$sqr8x_sp_alt:
lea r10,[((4096-64))+r9*4] lea r10,[((4096-64))+r9*2]
lea rsp,[((-64))+r9*4+rsp] lea rsp,[((-64))+r9*2+rsp]
sub r11,r10 sub r11,r10
mov r10,0 mov r10,0
cmovc r11,r10 cmovc r11,r10
@ -700,58 +700,80 @@ $L$sqr8x_sp_done:
mov r10,r9 mov r10,r9
neg r9 neg r9
lea r11,[64+r9*2+rsp]
mov QWORD[32+rsp],r8 mov QWORD[32+rsp],r8
mov QWORD[40+rsp],rax mov QWORD[40+rsp],rax
$L$sqr8x_body: $L$sqr8x_body:
mov rbp,r9 DB 102,72,15,110,209
DB 102,73,15,110,211
shr rbp,3+2
mov eax,DWORD[((OPENSSL_ia32cap_P+8))]
jmp NEAR $L$sqr8x_copy_n
ALIGN 32
$L$sqr8x_copy_n:
movq xmm0,QWORD[rcx]
movq xmm1,QWORD[8+rcx]
movq xmm3,QWORD[16+rcx]
movq xmm4,QWORD[24+rcx]
lea rcx,[32+rcx]
movdqa XMMWORD[r11],xmm0
movdqa XMMWORD[16+r11],xmm1
movdqa XMMWORD[32+r11],xmm3
movdqa XMMWORD[48+r11],xmm4
lea r11,[64+r11]
dec rbp
jnz NEAR $L$sqr8x_copy_n
pxor xmm0,xmm0 pxor xmm0,xmm0
DB 102,72,15,110,207 DB 102,72,15,110,207
DB 102,73,15,110,218 DB 102,73,15,110,218
call bn_sqr8x_internal call bn_sqr8x_internal
pxor xmm0,xmm0
lea rax,[48+rsp]
lea rdx,[64+r9*2+rsp]
shr r9,3+2 lea rbx,[r9*1+rdi]
mov rsi,QWORD[40+rsp] mov rcx,r9
jmp NEAR $L$sqr8x_zero mov rdx,r9
DB 102,72,15,126,207
sar rcx,3+2
jmp NEAR $L$sqr8x_sub
ALIGN 32 ALIGN 32
$L$sqr8x_zero: $L$sqr8x_sub:
movdqa XMMWORD[rax],xmm0 mov r12,QWORD[rbx]
movdqa XMMWORD[16+rax],xmm0 mov r13,QWORD[8+rbx]
movdqa XMMWORD[32+rax],xmm0 mov r14,QWORD[16+rbx]
movdqa XMMWORD[48+rax],xmm0 mov r15,QWORD[24+rbx]
lea rax,[64+rax] lea rbx,[32+rbx]
movdqa XMMWORD[rdx],xmm0 sbb r12,QWORD[rbp]
movdqa XMMWORD[16+rdx],xmm0 sbb r13,QWORD[8+rbp]
movdqa XMMWORD[32+rdx],xmm0 sbb r14,QWORD[16+rbp]
movdqa XMMWORD[48+rdx],xmm0 sbb r15,QWORD[24+rbp]
lea rdx,[64+rdx] lea rbp,[32+rbp]
dec r9 mov QWORD[rdi],r12
jnz NEAR $L$sqr8x_zero mov QWORD[8+rdi],r13
mov QWORD[16+rdi],r14
mov QWORD[24+rdi],r15
lea rdi,[32+rdi]
inc rcx
jnz NEAR $L$sqr8x_sub
sbb rax,0
lea rbx,[r9*1+rbx]
lea rdi,[r9*1+rdi]
DB 102,72,15,110,200
pxor xmm0,xmm0
pshufd xmm1,xmm1,0
mov rsi,QWORD[40+rsp]
jmp NEAR $L$sqr8x_cond_copy
ALIGN 32
$L$sqr8x_cond_copy:
movdqa xmm2,XMMWORD[rbx]
movdqa xmm3,XMMWORD[16+rbx]
lea rbx,[32+rbx]
movdqu xmm4,XMMWORD[rdi]
movdqu xmm5,XMMWORD[16+rdi]
lea rdi,[32+rdi]
movdqa XMMWORD[(-32)+rbx],xmm0
movdqa XMMWORD[(-16)+rbx],xmm0
movdqa XMMWORD[(-32)+rdx*1+rbx],xmm0
movdqa XMMWORD[(-16)+rdx*1+rbx],xmm0
pcmpeqd xmm0,xmm1
pand xmm2,xmm1
pand xmm3,xmm1
pand xmm4,xmm0
pand xmm5,xmm0
pxor xmm0,xmm0
por xmm4,xmm2
por xmm5,xmm3
movdqu XMMWORD[(-32)+rdi],xmm4
movdqu XMMWORD[(-16)+rdi],xmm5
add r9,32
jnz NEAR $L$sqr8x_cond_copy
mov rax,1 mov rax,1
mov r15,QWORD[((-48))+rsi] mov r15,QWORD[((-48))+rsi]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -108,14 +108,14 @@
'source/lib/content_encoding.c', 'source/lib/content_encoding.c',
'source/lib/cookie.c', 'source/lib/cookie.c',
'source/lib/curl_addrinfo.c', 'source/lib/curl_addrinfo.c',
'source/lib/curl_des.c',
'source/lib/curl_endian.c',
'source/lib/curl_fnmatch.c', 'source/lib/curl_fnmatch.c',
'source/lib/curl_gethostname.c', 'source/lib/curl_gethostname.c',
'source/lib/curl_gssapi.c', 'source/lib/curl_gssapi.c',
'source/lib/curl_memrchr.c', 'source/lib/curl_memrchr.c',
'source/lib/curl_multibyte.c', 'source/lib/curl_multibyte.c',
'source/lib/curl_ntlm.c',
'source/lib/curl_ntlm_core.c', 'source/lib/curl_ntlm_core.c',
'source/lib/curl_ntlm_msgs.c',
'source/lib/curl_ntlm_wb.c', 'source/lib/curl_ntlm_wb.c',
'source/lib/curl_rtmp.c', 'source/lib/curl_rtmp.c',
'source/lib/curl_sasl.c', 'source/lib/curl_sasl.c',
@ -137,21 +137,23 @@
'source/lib/hmac.c', 'source/lib/hmac.c',
'source/lib/hostasyn.c', 'source/lib/hostasyn.c',
'source/lib/hostcheck.c', 'source/lib/hostcheck.c',
'source/lib/hostip.c',
'source/lib/hostip4.c', 'source/lib/hostip4.c',
'source/lib/hostip6.c', 'source/lib/hostip6.c',
'source/lib/hostip.c',
'source/lib/hostsyn.c', 'source/lib/hostsyn.c',
'source/lib/http2.c',
'source/lib/http.c', 'source/lib/http.c',
'source/lib/http_chunks.c', 'source/lib/http_chunks.c',
'source/lib/http_digest.c', 'source/lib/http_digest.c',
'source/lib/http_negotiate.c', 'source/lib/http_negotiate.c',
'source/lib/http_negotiate_sspi.c', 'source/lib/http_ntlm.c',
'source/lib/http_proxy.c', 'source/lib/http_proxy.c',
'source/lib/idn_win32.c', 'source/lib/idn_win32.c',
'source/lib/if2ip.c', 'source/lib/if2ip.c',
'source/lib/imap.c', 'source/lib/imap.c',
'source/lib/inet_ntop.c', 'source/lib/inet_ntop.c',
'source/lib/inet_pton.c', 'source/lib/inet_pton.c',
'source/lib/krb5.c',
'source/lib/ldap.c', 'source/lib/ldap.c',
'source/lib/llist.c', 'source/lib/llist.c',
'source/lib/md4.c', 'source/lib/md4.c',
@ -162,6 +164,8 @@
'source/lib/netrc.c', 'source/lib/netrc.c',
'source/lib/non-ascii.c', 'source/lib/non-ascii.c',
'source/lib/nonblock.c', 'source/lib/nonblock.c',
'source/lib/nwlib.c',
'source/lib/nwos.c',
'source/lib/openldap.c', 'source/lib/openldap.c',
'source/lib/parsedate.c', 'source/lib/parsedate.c',
'source/lib/pingpong.c', 'source/lib/pingpong.c',
@ -175,6 +179,7 @@
'source/lib/sendf.c', 'source/lib/sendf.c',
'source/lib/share.c', 'source/lib/share.c',
'source/lib/slist.c', 'source/lib/slist.c',
'source/lib/smb.c',
'source/lib/smtp.c', 'source/lib/smtp.c',
'source/lib/socks.c', 'source/lib/socks.c',
'source/lib/socks_gssapi.c', 'source/lib/socks_gssapi.c',
@ -187,17 +192,31 @@
'source/lib/strerror.c', 'source/lib/strerror.c',
'source/lib/strtok.c', 'source/lib/strtok.c',
'source/lib/strtoofft.c', 'source/lib/strtoofft.c',
'source/lib/system_win32.c',
'source/lib/telnet.c', 'source/lib/telnet.c',
'source/lib/tftp.c', 'source/lib/tftp.c',
'source/lib/timeval.c', 'source/lib/timeval.c',
'source/lib/transfer.c', 'source/lib/transfer.c',
'source/lib/url.c', 'source/lib/url.c',
'source/lib/vauth/cleartext.c',
'source/lib/vauth/cram.c',
'source/lib/vauth/digest.c',
'source/lib/vauth/digest_sspi.c',
'source/lib/vauth/krb5_gssapi.c',
'source/lib/vauth/krb5_sspi.c',
'source/lib/vauth/ntlm.c',
'source/lib/vauth/ntlm_sspi.c',
'source/lib/vauth/oauth2.c',
'source/lib/vauth/spnego_gssapi.c',
'source/lib/vauth/spnego_sspi.c',
'source/lib/vauth/vauth.c',
'source/lib/version.c', 'source/lib/version.c',
'source/lib/vtls/axtls.c', 'source/lib/vtls/axtls.c',
'source/lib/vtls/cyassl.c', 'source/lib/vtls/cyassl.c',
'source/lib/vtls/darwinssl.c', 'source/lib/vtls/darwinssl.c',
'source/lib/vtls/gskit.c', 'source/lib/vtls/gskit.c',
'source/lib/vtls/gtls.c', 'source/lib/vtls/gtls.c',
'source/lib/vtls/mbedtls.c',
'source/lib/vtls/nss.c', 'source/lib/vtls/nss.c',
'source/lib/vtls/openssl.c', 'source/lib/vtls/openssl.c',
'source/lib/vtls/polarssl.c', 'source/lib/vtls/polarssl.c',
@ -206,6 +225,7 @@
'source/lib/vtls/vtls.c', 'source/lib/vtls/vtls.c',
'source/lib/warnless.c', 'source/lib/warnless.c',
'source/lib/wildcard.c', 'source/lib/wildcard.c',
'source/lib/x509asn1.c',
], ],
}, },
{ {